Merge changes from topic "audio-v6"
* changes:
Allow device to have use Audio HAL V6
Introduce Audio V6
Script to generate new audio HAL version
Change some formatting for better script parsing
Convert audio HAL service mk to bp and rename the service
diff --git a/Android.bp b/Android.bp
index dd84737..70e0574 100644
--- a/Android.bp
+++ b/Android.bp
@@ -45,4 +45,5 @@
"-g",
],
+ require_root: true,
}
diff --git a/audio/4.0/config/api/current.txt b/audio/4.0/config/api/current.txt
index d59cade..3462568 100644
--- a/audio/4.0/config/api/current.txt
+++ b/audio/4.0/config/api/current.txt
@@ -114,11 +114,15 @@
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_EVRCNW;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_EVRCWB;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_E_AC3;
+ enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_E_AC3_JOC;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_FLAC;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_HE_AAC_V1;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_HE_AAC_V2;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_IEC61937;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_LDAC;
+ enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_MAT_1_0;
+ enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_MAT_2_0;
+ enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_MAT_2_1;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_MP2;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_MP3;
enum_constant public static final audio.policy.configuration.V4_0.AudioFormat AUDIO_FORMAT_OPUS;
diff --git a/audio/4.0/config/audio_policy_configuration.xsd b/audio/4.0/config/audio_policy_configuration.xsd
index 58bab22..f26e41b 100644
--- a/audio/4.0/config/audio_policy_configuration.xsd
+++ b/audio/4.0/config/audio_policy_configuration.xsd
@@ -357,6 +357,10 @@
<xs:enumeration value="AUDIO_FORMAT_APTX_HD"/>
<xs:enumeration value="AUDIO_FORMAT_AC4"/>
<xs:enumeration value="AUDIO_FORMAT_LDAC"/>
+ <xs:enumeration value="AUDIO_FORMAT_E_AC3_JOC"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_1_0"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_2_0"/>
+ <xs:enumeration value="AUDIO_FORMAT_MAT_2_1"/>
</xs:restriction>
</xs:simpleType>
<xs:simpleType name="extendableAudioFormat">
diff --git a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
index 022f75e..2703b2b 100644
--- a/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
+++ b/audio/core/all-versions/vts/functional/4.0/AudioPrimaryHidlHalTest.cpp
@@ -29,18 +29,21 @@
TEST_F(AudioHidlTest, OpenPrimaryDeviceUsingGetDevice) {
doc::test("Calling openDevice(\"primary\") should return the primary device.");
- {
- Result result;
- sp<IDevice> baseDevice;
- ASSERT_OK(devicesFactory->openDevice("primary", returnIn(result, baseDevice)));
- ASSERT_OK(result);
- ASSERT_TRUE(baseDevice != nullptr);
+ struct WaitExecutor {
+ ~WaitExecutor() { waitForDeviceDestruction(); }
+ } waitExecutor; // Make sure we wait for the device destruction on exiting from the test.
+ Result result;
+ sp<IDevice> baseDevice;
+ ASSERT_OK(devicesFactory->openDevice("primary", returnIn(result, baseDevice)));
+ if (result != Result::OK && isPrimaryDeviceOptional()) {
+ GTEST_SKIP() << "No primary device on this factory"; // returns
+ }
+ ASSERT_OK(result);
+ ASSERT_TRUE(baseDevice != nullptr);
- Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
- ASSERT_TRUE(primaryDevice.isOk());
- ASSERT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
- } // Destroy local IDevice proxy
- waitForDeviceDestruction();
+ Return<sp<IPrimaryDevice>> primaryDevice = IPrimaryDevice::castFrom(baseDevice);
+ ASSERT_TRUE(primaryDevice.isOk());
+ ASSERT_TRUE(sp<IPrimaryDevice>(primaryDevice) != nullptr);
}
//////////////////////////////////////////////////////////////////////////////
@@ -117,6 +120,10 @@
ASSERT_NE(0U, activeMicrophones.size());
}
stream->close();
+ // Workaround for b/139329877. Ensures the stream gets closed on the audio hal side.
+ stream.clear();
+ IPCThreadState::self()->flushCommands();
+ usleep(1000);
if (efGroup) {
EventFlag::deleteEventFlag(&efGroup);
}
@@ -148,6 +155,7 @@
// initial state. To workaround this, destroy the HAL at the end of this test.
device.clear();
waitForDeviceDestruction();
+ ASSERT_NO_FATAL_FAILURE(initPrimaryDevice());
}
static void testGetDevices(IStream* stream, AudioDevice expectedDevice) {
diff --git a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
index 7f8b6a0..fb96323 100644
--- a/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
+++ b/audio/core/all-versions/vts/functional/AudioPrimaryHidlHalTest.h
@@ -215,25 +215,32 @@
// Test all audio devices
class AudioHidlTest : public AudioPolicyConfigTest {
public:
- void SetUp() override {
- ASSERT_NO_FATAL_FAILURE(HidlTest::SetUp()); // setup base
+ static void SetUpTestSuite() {
+ devicesFactory = ::testing::VtsHalHidlTargetTestBase::getService<IDevicesFactory>(
+ environment->getServiceName<IDevicesFactory>());
+ }
- if (devicesFactory == nullptr) {
- environment->registerTearDown([] { devicesFactory.clear(); });
- devicesFactory = ::testing::VtsHalHidlTargetTestBase::getService<IDevicesFactory>(
- environment->getServiceName<IDevicesFactory>("default"));
- }
- ASSERT_TRUE(devicesFactory != nullptr);
- }
+ static void TearDownTestSuite() { devicesFactory.clear(); }
+
+ void SetUp() override {
+ ASSERT_NO_FATAL_FAILURE(AudioPolicyConfigTest::SetUp()); // setup base
+ // Failures during SetUpTestSuite do not cause test termination.
+ ASSERT_TRUE(devicesFactory != nullptr);
+ }
protected:
// Cache the devicesFactory retrieval to speed up each test by ~0.5s
static sp<IDevicesFactory> devicesFactory;
+
+ static bool isPrimaryDeviceOptional() {
+ // It's OK not to have "primary" device on non-default audio HAL service.
+ return environment->getServiceName<IDevicesFactory>() != kDefaultServiceName;
+ }
};
sp<IDevicesFactory> AudioHidlTest::devicesFactory;
TEST_F(AudioHidlTest, GetAudioDevicesFactoryService) {
- doc::test("Test the getService (called in SetUp)");
+ doc::test("Test the getService");
}
TEST_F(AudioHidlTest, OpenDeviceInvalidParameter) {
@@ -257,23 +264,31 @@
// Test the primary device
class AudioPrimaryHidlTest : public AudioHidlTest {
public:
- /** Primary HAL test are NOT thread safe. */
+ static void SetUpTestSuite() {
+ ASSERT_NO_FATAL_FAILURE(AudioHidlTest::SetUpTestSuite());
+ ASSERT_NO_FATAL_FAILURE(initPrimaryDevice());
+ }
+
+ static void TearDownTestSuite() {
+ device.clear();
+ AudioHidlTest::TearDownTestSuite();
+ }
+
void SetUp() override {
ASSERT_NO_FATAL_FAILURE(AudioHidlTest::SetUp()); // setup base
-
- if (device == nullptr) {
- initPrimaryDevice();
- ASSERT_TRUE(device != nullptr);
- environment->registerTearDown([] { device.clear(); });
+ if (device == nullptr && isPrimaryDeviceOptional()) {
+ GTEST_SKIP() << "No primary device on this factory";
}
+ ASSERT_TRUE(device != nullptr);
}
protected:
// Cache the device opening to speed up each test by ~0.5s
static sp<IPrimaryDevice> device;
- private:
- void initPrimaryDevice() {
+ static void initPrimaryDevice() {
+ // Failures during test suite set up do not cause test termination.
+ ASSERT_TRUE(devicesFactory != nullptr);
Result result;
#if MAJOR_VERSION == 2
sp<IDevice> baseDevice;
@@ -292,7 +307,7 @@
sp<IPrimaryDevice> AudioPrimaryHidlTest::device;
TEST_F(AudioPrimaryHidlTest, OpenPrimaryDevice) {
- doc::test("Test the openDevice (called in SetUp)");
+ doc::test("Test the openDevice (called during setup)");
}
TEST_F(AudioPrimaryHidlTest, Init) {
@@ -692,6 +707,7 @@
if (open) {
ASSERT_OK(stream->close());
}
+ AudioConfigPrimaryTest::TearDown();
}
protected:
@@ -704,8 +720,9 @@
////////////////////////////// openOutputStream //////////////////////////////
class OutputStreamTest : public OpenStreamTest<IStreamOut> {
- virtual void SetUp() override {
+ void SetUp() override {
ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base
+ if (IsSkipped()) return; // do not attempt to use 'device'
address.device = AudioDevice::OUT_DEFAULT;
const AudioConfig& config = GetParam();
// TODO: test all flag combination
@@ -752,8 +769,9 @@
////////////////////////////// openInputStream //////////////////////////////
class InputStreamTest : public OpenStreamTest<IStreamIn> {
- virtual void SetUp() override {
+ void SetUp() override {
ASSERT_NO_FATAL_FAILURE(OpenStreamTest::SetUp()); // setup base
+ if (IsSkipped()) return; // do not attempt to use 'device'
address.device = AudioDevice::IN_DEFAULT;
const AudioConfig& config = GetParam();
// TODO: test all supported flags and source
diff --git a/authsecret/1.0/vts/functional/Android.bp b/authsecret/1.0/vts/functional/Android.bp
index f2b3a8a..9ce9cda 100644
--- a/authsecret/1.0/vts/functional/Android.bp
+++ b/authsecret/1.0/vts/functional/Android.bp
@@ -19,5 +19,9 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalAuthSecretV1_0TargetTest.cpp"],
static_libs: ["android.hardware.authsecret@1.0"],
- test_suites: ["general-tests"],
+ test_suites: [
+ "general-tests",
+ "vts-core",
+ ],
+ require_root: true,
}
diff --git a/authsecret/1.0/vts/functional/VtsHalAuthSecretV1_0TargetTest.cpp b/authsecret/1.0/vts/functional/VtsHalAuthSecretV1_0TargetTest.cpp
index 255d4de..0bff9df 100644
--- a/authsecret/1.0/vts/functional/VtsHalAuthSecretV1_0TargetTest.cpp
+++ b/authsecret/1.0/vts/functional/VtsHalAuthSecretV1_0TargetTest.cpp
@@ -16,36 +16,22 @@
#include <android/hardware/authsecret/1.0/IAuthSecret.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
using ::android::hardware::hidl_vec;
using ::android::hardware::authsecret::V1_0::IAuthSecret;
using ::android::sp;
-// Test environment for Boot HIDL HAL.
-class AuthSecretHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static AuthSecretHidlEnvironment* Instance() {
- static AuthSecretHidlEnvironment* instance = new AuthSecretHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IAuthSecret>(); }
-
- private:
- AuthSecretHidlEnvironment() {}
-};
-
/**
* There is no expected behaviour that can be tested so these tests check the
* HAL doesn't crash with different execution orders.
*/
-struct AuthSecretHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class AuthSecretHidlTest : public testing::TestWithParam<std::string> {
+ public:
virtual void SetUp() override {
- authsecret = ::testing::VtsHalHidlTargetTestBase::getService<IAuthSecret>(
- AuthSecretHidlEnvironment::Instance()->getServiceName<IAuthSecret>());
+ authsecret = IAuthSecret::getService(GetParam());
ASSERT_NE(authsecret, nullptr);
// All tests must enroll the correct secret first as this cannot be changed
@@ -59,18 +45,18 @@
};
/* Provision the primary user with a secret. */
-TEST_F(AuthSecretHidlTest, provisionPrimaryUserCredential) {
+TEST_P(AuthSecretHidlTest, provisionPrimaryUserCredential) {
// Secret provisioned by SetUp()
}
/* Provision the primary user with a secret and pass the secret again. */
-TEST_F(AuthSecretHidlTest, provisionPrimaryUserCredentialAndPassAgain) {
+TEST_P(AuthSecretHidlTest, provisionPrimaryUserCredentialAndPassAgain) {
// Secret provisioned by SetUp()
authsecret->primaryUserCredential(CORRECT_SECRET);
}
/* Provision the primary user with a secret and pass the secret again repeatedly. */
-TEST_F(AuthSecretHidlTest, provisionPrimaryUserCredentialAndPassAgainMultipleTimes) {
+TEST_P(AuthSecretHidlTest, provisionPrimaryUserCredentialAndPassAgainMultipleTimes) {
// Secret provisioned by SetUp()
constexpr int N = 5;
for (int i = 0; i < N; ++i) {
@@ -82,16 +68,12 @@
* should never happen and is an framework bug if it does. As the secret is
* wrong, the HAL implementation may not be able to function correctly but it
* should fail gracefully. */
-TEST_F(AuthSecretHidlTest, provisionPrimaryUserCredentialAndWrongSecret) {
+TEST_P(AuthSecretHidlTest, provisionPrimaryUserCredentialAndWrongSecret) {
// Secret provisioned by SetUp()
authsecret->primaryUserCredential(WRONG_SECRET);
}
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(AuthSecretHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- AuthSecretHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, AuthSecretHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IAuthSecret::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/automotive/OWNERS b/automotive/OWNERS
index 3cf4489..83ee63c 100644
--- a/automotive/OWNERS
+++ b/automotive/OWNERS
@@ -1,4 +1,5 @@
-randolphs@google.com
pirozzoj@google.com
twasilczyk@google.com
pfg@google.com
+gurunagarajan@google.com
+keunyoung@google.com
diff --git a/automotive/can/1.0/default/Android.bp b/automotive/can/1.0/default/Android.bp
index 0a4afd6..8aa1d6b 100644
--- a/automotive/can/1.0/default/Android.bp
+++ b/automotive/can/1.0/default/Android.bp
@@ -47,7 +47,6 @@
shared_libs: [
"android.hardware.automotive.can@1.0",
"libhidlbase",
- "libhidltransport",
],
static_libs: [
"android.hardware.automotive.can@libnetdevice",
diff --git a/automotive/can/1.0/tools/Android.bp b/automotive/can/1.0/tools/Android.bp
index 8c26985..21f364b 100644
--- a/automotive/can/1.0/tools/Android.bp
+++ b/automotive/can/1.0/tools/Android.bp
@@ -23,7 +23,6 @@
shared_libs: [
"android.hardware.automotive.can@1.0",
"libhidlbase",
- "libhidltransport",
],
header_libs: [
"android.hardware.automotive.can@hidl-utils-lib",
@@ -39,7 +38,6 @@
shared_libs: [
"android.hardware.automotive.can@1.0",
"libhidlbase",
- "libhidltransport",
],
header_libs: [
"android.hardware.automotive.can@hidl-utils-lib",
@@ -55,6 +53,5 @@
shared_libs: [
"android.hardware.automotive.can@1.0",
"libhidlbase",
- "libhidltransport",
],
}
diff --git a/automotive/evs/1.1/Android.bp b/automotive/evs/1.1/Android.bp
index d2e85f1..c850c91 100644
--- a/automotive/evs/1.1/Android.bp
+++ b/automotive/evs/1.1/Android.bp
@@ -10,13 +10,15 @@
"types.hal",
"IEvsCamera.hal",
"IEvsCameraStream.hal",
+ "IEvsEnumerator.hal",
],
interfaces: [
"android.hardware.automotive.evs@1.0",
+ "android.hardware.camera.device@3.2",
"android.hardware.graphics.common@1.0",
"android.hardware.graphics.common@1.1",
"android.hardware.graphics.common@1.2",
"android.hidl.base@1.0",
],
- gen_java: true,
+ gen_java: false,
}
diff --git a/automotive/evs/1.1/IEvsCamera.hal b/automotive/evs/1.1/IEvsCamera.hal
index 21ca79e..975b6c6 100644
--- a/automotive/evs/1.1/IEvsCamera.hal
+++ b/automotive/evs/1.1/IEvsCamera.hal
@@ -26,6 +26,14 @@
*/
interface IEvsCamera extends @1.0::IEvsCamera {
/**
+ * Returns the description of this camera.
+ *
+ * @return info The description of this camera. This must be the same value as
+ * reported by EvsEnumerator::getCameraList_1_1().
+ */
+ getCameraInfo_1_1() generates (CameraDesc info);
+
+ /**
* Requests to pause EVS camera stream events.
*
* Like stopVideoStream(), events may continue to arrive for some time
@@ -100,7 +108,27 @@
unsetMaster() generates (EvsResult result);
/**
- * Requests to set a camera parameter.
+ * Retrieves a list of parameters this camera supports.
+ *
+ * @return params A list of CameraParam that this camera supports.
+ */
+ getParameterList() generates (vec<CameraParam> params);
+
+ /**
+ * Requests a valid value range of a camera parameter
+ *
+ * @param id The identifier of camera parameter, CameraParam enum.
+ *
+ * @return min The lower bound of valid parameter value range.
+ * @return max The upper bound of valid parameter value range.
+ * @return step The resolution of values in valid range.
+ */
+ getIntParameterRange(CameraParam id)
+ generates (int32_t min, int32_t max, int32_t step);
+
+ /**
+ * Requests to set a camera parameter. Only a request from the master
+ * client will be processed successfully.
*
* @param id The identifier of camera parameter, CameraParam enum.
* value A desired parameter value.
@@ -114,7 +142,7 @@
* from what the client gives if, for example, the
* driver does not support a target parameter.
*/
- setParameter(CameraParam id, int32_t value)
+ setIntParameter(CameraParam id, int32_t value)
generates (EvsResult result, int32_t effectiveValue);
/**
@@ -126,5 +154,5 @@
* not supported.
* value A value of requested camera parameter.
*/
- getParameter(CameraParam id) generates(EvsResult result, int32_t value);
+ getIntParameter(CameraParam id) generates(EvsResult result, int32_t value);
};
diff --git a/automotive/evs/1.1/IEvsCameraStream.hal b/automotive/evs/1.1/IEvsCameraStream.hal
index 7c7f832..9e4ea19 100644
--- a/automotive/evs/1.1/IEvsCameraStream.hal
+++ b/automotive/evs/1.1/IEvsCameraStream.hal
@@ -17,15 +17,32 @@
package android.hardware.automotive.evs@1.1;
import @1.0::IEvsCameraStream;
+import @1.1::BufferDesc;
+import @1.1::EvsEvent;
/**
* Implemented on client side to receive asynchronous streaming event deliveries.
*/
interface IEvsCameraStream extends @1.0::IEvsCameraStream {
+
+ /**
+ * Receives calls from the HAL each time a video frame is ready for inspection.
+ * Buffer handles received by this method must be returned via calls to
+ * IEvsCamera::doneWithFrame_1_1(). When the video stream is stopped via a call
+ * to IEvsCamera::stopVideoStream(), this callback may continue to happen for
+ * some time as the pipeline drains. Each frame must still be returned.
+ * When the last frame in the stream has been delivered, STREAM_STOPPED
+ * event must be delivered. No further frame deliveries may happen
+ * thereafter.
+ *
+ * @param buffer a buffer descriptor of a delivered image frame.
+ */
+ oneway deliverFrame_1_1(BufferDesc buffer);
+
/**
* Receives calls from the HAL each time an event happens.
*
* @param event EVS event with possible event information.
*/
- oneway notifyEvent(EvsEvent event);
+ oneway notify(EvsEvent event);
};
diff --git a/automotive/evs/1.1/IEvsEnumerator.hal b/automotive/evs/1.1/IEvsEnumerator.hal
new file mode 100644
index 0000000..1695821
--- /dev/null
+++ b/automotive/evs/1.1/IEvsEnumerator.hal
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.automotive.evs@1.1;
+
+import IEvsCamera;
+import @1.0::IEvsEnumerator;
+import @1.0::EvsResult;
+import android.hardware.camera.device@3.2::Stream;
+
+/**
+ * Provides the mechanism for EVS camera discovery
+ */
+interface IEvsEnumerator extends @1.0::IEvsEnumerator {
+ /**
+ * Returns a list of all EVS cameras available to the system
+ *
+ * @return cameras A list of cameras availale for EVS service.
+ */
+ getCameraList_1_1() generates (vec<CameraDesc> cameras);
+
+ /**
+ * Gets the IEvsCamera associated with a cameraId from a CameraDesc
+ *
+ * Given a camera's unique cameraId from CameraDesc, returns the
+ * IEvsCamera interface associated with the specified camera. When
+ * done using the camera, the caller may release it by calling closeCamera().
+ *
+ * @param cameraId A unique identifier of the camera.
+ * @param streamCfg A stream configuration the client wants to use.
+ * @return evsCamera EvsCamera object associated with a given cameraId.
+ * Returned object would be null if a camera device does
+ * not support a given stream configuration or is already
+ * configured differently by another client.
+ */
+ openCamera_1_1(string cameraId, Stream streamCfg) generates (IEvsCamera evsCamera);
+};
diff --git a/automotive/evs/1.1/default/Android.bp b/automotive/evs/1.1/default/Android.bp
index 411f0ff..41cb426 100644
--- a/automotive/evs/1.1/default/Android.bp
+++ b/automotive/evs/1.1/default/Android.bp
@@ -7,26 +7,41 @@
"service.cpp",
"EvsCamera.cpp",
"EvsEnumerator.cpp",
- "EvsDisplay.cpp"
+ "EvsDisplay.cpp",
+ "ConfigManager.cpp",
+ "ConfigManagerUtil.cpp",
],
init_rc: ["android.hardware.automotive.evs@1.1-service.rc"],
shared_libs: [
"android.hardware.automotive.evs@1.0",
"android.hardware.automotive.evs@1.1",
+ "android.hardware.camera.device@3.2",
"libbase",
"libbinder",
- "libcutils",
+ "liblog",
"libhardware",
"libhidlbase",
- "libhidltransport",
"liblog",
"libui",
"libutils",
+ "libcamera_metadata",
+ "libtinyxml2",
],
cflags: [
"-O0",
"-g",
],
+
+ required: [
+ "evs_default_configuration.xml",
+ ],
+}
+
+prebuilt_etc {
+ name: "evs_default_configuration.xml",
+
+ src: "resources/evs_default_configuration.xml",
+ sub_dir: "automotive/evs",
}
diff --git a/automotive/evs/1.1/default/ConfigManager.cpp b/automotive/evs/1.1/default/ConfigManager.cpp
new file mode 100644
index 0000000..96a2f98
--- /dev/null
+++ b/automotive/evs/1.1/default/ConfigManager.cpp
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+#include <fstream>
+#include <thread>
+
+#include <hardware/gralloc.h>
+#include <utils/SystemClock.h>
+#include <android/hardware/camera/device/3.2/ICameraDevice.h>
+
+#include "ConfigManager.h"
+
+using ::android::hardware::camera::device::V3_2::StreamRotation;
+
+
+ConfigManager::~ConfigManager() {
+ /* Nothing to do */
+}
+
+
+void ConfigManager::readCameraInfo(const XMLElement * const aCameraElem) {
+ if (aCameraElem == nullptr) {
+ ALOGW("XML file does not have required camera element");
+ return;
+ }
+
+ const XMLElement *curElem = aCameraElem->FirstChildElement();
+ while (curElem != nullptr) {
+ if (!strcmp(curElem->Name(), "group")) {
+ /* camera group identifier */
+ const char *group_id = curElem->FindAttribute("group_id")->Value();
+
+ /* create CameraGroup */
+ unique_ptr<ConfigManager::CameraGroup> aCameraGroup(new ConfigManager::CameraGroup());
+
+ /* add a camera device to its group */
+ addCameraDevices(curElem->FindAttribute("device_id")->Value(), aCameraGroup);
+
+ /* a list of camera stream configurations */
+ const XMLElement *childElem =
+ curElem->FirstChildElement("caps")->FirstChildElement("stream");
+ while (childElem != nullptr) {
+ /* read 5 attributes */
+ const XMLAttribute *idAttr = childElem->FindAttribute("id");
+ const XMLAttribute *widthAttr = childElem->FindAttribute("width");
+ const XMLAttribute *heightAttr = childElem->FindAttribute("height");
+ const XMLAttribute *fmtAttr = childElem->FindAttribute("format");
+ const XMLAttribute *fpsAttr = childElem->FindAttribute("framerate");
+
+ const int32_t id = stoi(idAttr->Value());
+ int32_t framerate = 0;
+ if (fpsAttr != nullptr) {
+ framerate = stoi(fpsAttr->Value());
+ }
+
+ int32_t pixFormat;
+ if (ConfigManagerUtil::convertToPixelFormat(fmtAttr->Value(),
+ pixFormat)) {
+ RawStreamConfiguration cfg = {
+ id,
+ stoi(widthAttr->Value()),
+ stoi(heightAttr->Value()),
+ pixFormat,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+ framerate
+ };
+ aCameraGroup->streamConfigurations[id] = cfg;
+ }
+
+ childElem = childElem->NextSiblingElement("stream");
+ }
+
+ /* camera group synchronization */
+ const char *sync = curElem->FindAttribute("synchronized")->Value();
+ aCameraGroup->synchronized =
+ static_cast<bool>(strcmp(sync, "false"));
+
+ /* add a group to hash map */
+ mCameraGroups[group_id] = std::move(aCameraGroup);
+ } else if (!strcmp(curElem->Name(), "device")) {
+ /* camera unique identifier */
+ const char *id = curElem->FindAttribute("id")->Value();
+
+ /* camera mount location */
+ const char *pos = curElem->FindAttribute("position")->Value();
+
+ /* store read camera module information */
+ mCameraInfo[id] = readCameraDeviceInfo(curElem);
+
+ /* assign a camera device to a position group */
+ mCameraPosition[pos].emplace(id);
+ } else {
+ /* ignore other device types */
+ ALOGD("Unknown element %s is ignored", curElem->Name());
+ }
+
+ curElem = curElem->NextSiblingElement();
+ }
+}
+
+
+unique_ptr<ConfigManager::CameraInfo>
+ConfigManager::readCameraDeviceInfo(const XMLElement *aDeviceElem) {
+ if (aDeviceElem == nullptr) {
+ return nullptr;
+ }
+
+ /* create a CameraInfo to be filled */
+ unique_ptr<ConfigManager::CameraInfo> aCamera(new ConfigManager::CameraInfo());
+
+ /* size information to allocate camera_metadata_t */
+ size_t totalEntries = 0;
+ size_t totalDataSize = 0;
+
+ /* read device capabilities */
+ totalEntries +=
+ readCameraCapabilities(aDeviceElem->FirstChildElement("caps"),
+ aCamera,
+ totalDataSize);
+
+
+ /* read camera metadata */
+ totalEntries +=
+ readCameraMetadata(aDeviceElem->FirstChildElement("characteristics"),
+ aCamera,
+ totalDataSize);
+
+ /* construct camera_metadata_t */
+ if (!constructCameraMetadata(aCamera, totalEntries, totalDataSize)) {
+ ALOGW("Either failed to allocate memory or "
+ "allocated memory was not large enough");
+ }
+
+ return aCamera;
+}
+
+
+size_t ConfigManager::readCameraCapabilities(const XMLElement * const aCapElem,
+ unique_ptr<ConfigManager::CameraInfo> &aCamera,
+ size_t &dataSize) {
+ if (aCapElem == nullptr) {
+ return 0;
+ }
+
+ string token;
+ const XMLElement *curElem = nullptr;
+
+ /* a list of supported camera parameters/controls */
+ curElem = aCapElem->FirstChildElement("supported_controls");
+ if (curElem != nullptr) {
+ const XMLElement *ctrlElem = curElem->FirstChildElement("control");
+ while (ctrlElem != nullptr) {
+ const char *nameAttr = ctrlElem->FindAttribute("name")->Value();;
+ const int32_t minVal = stoi(ctrlElem->FindAttribute("min")->Value());
+ const int32_t maxVal = stoi(ctrlElem->FindAttribute("max")->Value());
+
+ int32_t stepVal = 1;
+ const XMLAttribute *stepAttr = ctrlElem->FindAttribute("step");
+ if (stepAttr != nullptr) {
+ stepVal = stoi(stepAttr->Value());
+ }
+
+ CameraParam aParam;
+ if (ConfigManagerUtil::convertToEvsCameraParam(nameAttr,
+ aParam)) {
+ aCamera->controls.emplace(
+ aParam,
+ make_tuple(minVal, maxVal, stepVal)
+ );
+ }
+
+ ctrlElem = ctrlElem->NextSiblingElement("control");
+ }
+ }
+
+ /* a list of camera stream configurations */
+ curElem = aCapElem->FirstChildElement("stream");
+ while (curElem != nullptr) {
+ /* read 5 attributes */
+ const XMLAttribute *idAttr = curElem->FindAttribute("id");
+ const XMLAttribute *widthAttr = curElem->FindAttribute("width");
+ const XMLAttribute *heightAttr = curElem->FindAttribute("height");
+ const XMLAttribute *fmtAttr = curElem->FindAttribute("format");
+ const XMLAttribute *fpsAttr = curElem->FindAttribute("framerate");
+
+ const int32_t id = stoi(idAttr->Value());
+ int32_t framerate = 0;
+ if (fpsAttr != nullptr) {
+ framerate = stoi(fpsAttr->Value());
+ }
+
+ int32_t pixFormat;
+ if (ConfigManagerUtil::convertToPixelFormat(fmtAttr->Value(),
+ pixFormat)) {
+ RawStreamConfiguration cfg = {
+ id,
+ stoi(widthAttr->Value()),
+ stoi(heightAttr->Value()),
+ pixFormat,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+ framerate
+ };
+ aCamera->streamConfigurations[id] = cfg;
+ }
+
+ curElem = curElem->NextSiblingElement("stream");
+ }
+
+ dataSize = calculate_camera_metadata_entry_data_size(
+ get_camera_metadata_tag_type(
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+ ),
+ aCamera->streamConfigurations.size() * kStreamCfgSz
+ );
+
+ /* a single camera metadata entry contains multiple stream configurations */
+ return dataSize > 0 ? 1 : 0;
+}
+
+
+size_t ConfigManager::readCameraMetadata(const XMLElement * const aParamElem,
+ unique_ptr<ConfigManager::CameraInfo> &aCamera,
+ size_t &dataSize) {
+ if (aParamElem == nullptr) {
+ return 0;
+ }
+
+ const XMLElement *curElem = aParamElem->FirstChildElement("parameter");
+ size_t numEntries = 0;
+ camera_metadata_tag_t tag;
+ while (curElem != nullptr) {
+ if (!ConfigManagerUtil::convertToMetadataTag(curElem->FindAttribute("name")->Value(),
+ tag)) {
+ switch(tag) {
+ case ANDROID_LENS_DISTORTION:
+ case ANDROID_LENS_POSE_ROTATION:
+ case ANDROID_LENS_POSE_TRANSLATION:
+ case ANDROID_LENS_INTRINSIC_CALIBRATION: {
+ /* float[] */
+ size_t count = 0;
+ void *data = ConfigManagerUtil::convertFloatArray(
+ curElem->FindAttribute("size")->Value(),
+ curElem->FindAttribute("value")->Value(),
+ count
+ );
+
+ aCamera->cameraMetadata[tag] =
+ make_pair(make_unique<void *>(data), count);
+
+ ++numEntries;
+ dataSize += calculate_camera_metadata_entry_data_size(
+ get_camera_metadata_tag_type(tag), count
+ );
+
+ break;
+ }
+
+ default:
+ ALOGW("Parameter %s is not supported",
+ curElem->FindAttribute("name")->Value());
+ break;
+ }
+ }
+
+ curElem = curElem->NextSiblingElement("parameter");
+ }
+
+ return numEntries;
+}
+
+
+bool ConfigManager::constructCameraMetadata(unique_ptr<CameraInfo> &aCamera,
+ const size_t totalEntries,
+ const size_t totalDataSize) {
+ if (!aCamera->allocate(totalEntries, totalDataSize)) {
+ ALOGE("Failed to allocate memory for camera metadata");
+ return false;
+ }
+
+ const size_t numStreamConfigs = aCamera->streamConfigurations.size();
+ unique_ptr<int32_t[]> data(new int32_t[kStreamCfgSz * numStreamConfigs]);
+ int32_t *ptr = data.get();
+ for (auto &cfg : aCamera->streamConfigurations) {
+ for (auto i = 0; i < kStreamCfgSz; ++i) {
+ *ptr++ = cfg.second[i];
+ }
+ }
+ int32_t err = add_camera_metadata_entry(aCamera->characteristics,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ data.get(),
+ numStreamConfigs * kStreamCfgSz);
+
+ if (err) {
+ ALOGE("Failed to add stream configurations to metadata, ignored");
+ return false;
+ }
+
+ bool success = true;
+ for (auto &[tag, entry] : aCamera->cameraMetadata) {
+ /* try to add new camera metadata entry */
+ int32_t err = add_camera_metadata_entry(aCamera->characteristics,
+ tag,
+ entry.first.get(),
+ entry.second);
+ if (err) {
+ ALOGE("Failed to add an entry with a tag 0x%X", tag);
+
+ /* may exceed preallocated capacity */
+ ALOGE("Camera metadata has %ld / %ld entries and %ld / %ld bytes are filled",
+ (long)get_camera_metadata_entry_count(aCamera->characteristics),
+ (long)get_camera_metadata_entry_capacity(aCamera->characteristics),
+ (long)get_camera_metadata_data_count(aCamera->characteristics),
+ (long)get_camera_metadata_data_capacity(aCamera->characteristics));
+ ALOGE("\tCurrent metadata entry requires %ld bytes",
+ (long)calculate_camera_metadata_entry_data_size(tag, entry.second));
+
+ success = false;
+ }
+ }
+
+ ALOGV("Camera metadata has %ld / %ld entries and %ld / %ld bytes are filled",
+ (long)get_camera_metadata_entry_count(aCamera->characteristics),
+ (long)get_camera_metadata_entry_capacity(aCamera->characteristics),
+ (long)get_camera_metadata_data_count(aCamera->characteristics),
+ (long)get_camera_metadata_data_capacity(aCamera->characteristics));
+
+ return success;
+}
+
+
+void ConfigManager::readSystemInfo(const XMLElement * const aSysElem) {
+ if (aSysElem == nullptr) {
+ return;
+ }
+
+ /*
+ * Please note that this function assumes that a given system XML element
+ * and its child elements follow DTD. If it does not, it will cause a
+ * segmentation fault due to the failure of finding expected attributes.
+ */
+
+ /* read number of cameras available in the system */
+ const XMLElement *xmlElem = aSysElem->FirstChildElement("num_cameras");
+ if (xmlElem != nullptr) {
+ mSystemInfo.numCameras =
+ stoi(xmlElem->FindAttribute("value")->Value());
+ }
+}
+
+
+void ConfigManager::readDisplayInfo(const XMLElement * const aDisplayElem) {
+ if (aDisplayElem == nullptr) {
+ ALOGW("XML file does not have required camera element");
+ return;
+ }
+
+ const XMLElement *curDev = aDisplayElem->FirstChildElement("device");
+ while (curDev != nullptr) {
+ const char *id = curDev->FindAttribute("id")->Value();
+ //const char *pos = curDev->FirstAttribute("position")->Value();
+
+ unique_ptr<DisplayInfo> dpy(new DisplayInfo());
+ if (dpy == nullptr) {
+ ALOGE("Failed to allocate memory for DisplayInfo");
+ return;
+ }
+
+ const XMLElement *cap = curDev->FirstChildElement("caps");
+ if (cap != nullptr) {
+ const XMLElement *curStream = cap->FirstChildElement("stream");
+ while (curStream != nullptr) {
+ /* read 4 attributes */
+ const XMLAttribute *idAttr = curStream->FindAttribute("id");
+ const XMLAttribute *widthAttr = curStream->FindAttribute("width");
+ const XMLAttribute *heightAttr = curStream->FindAttribute("height");
+ const XMLAttribute *fmtAttr = curStream->FindAttribute("format");
+
+ const int32_t id = stoi(idAttr->Value());
+ int32_t pixFormat;
+ if (ConfigManagerUtil::convertToPixelFormat(fmtAttr->Value(),
+ pixFormat)) {
+ RawStreamConfiguration cfg = {
+ id,
+ stoi(widthAttr->Value()),
+ stoi(heightAttr->Value()),
+ pixFormat,
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT,
+ 0 // unused
+ };
+ dpy->streamConfigurations[id] = cfg;
+ }
+
+ curStream = curStream->NextSiblingElement("stream");
+ }
+ }
+
+ mDisplayInfo[id] = std::move(dpy);
+ curDev = curDev->NextSiblingElement("device");
+ }
+
+ return;
+}
+
+
+bool ConfigManager::readConfigDataFromXML() noexcept {
+ XMLDocument xmlDoc;
+
+ const int64_t parsingStart = android::elapsedRealtimeNano();
+
+ /* load and parse a configuration file */
+ xmlDoc.LoadFile(mConfigFilePath);
+ if (xmlDoc.ErrorID() != XML_SUCCESS) {
+ ALOGE("Failed to load and/or parse a configuration file, %s", xmlDoc.ErrorStr());
+ return false;
+ }
+
+ /* retrieve the root element */
+ const XMLElement *rootElem = xmlDoc.RootElement();
+ if (strcmp(rootElem->Name(), "configuration")) {
+ ALOGE("A configuration file is not in the required format. "
+ "See /etc/automotive/evs/evs_configuration.dtd");
+ return false;
+ }
+
+ /*
+ * parse camera information; this needs to be done before reading system
+ * information
+ */
+ readCameraInfo(rootElem->FirstChildElement("camera"));
+
+ /* parse system information */
+ readSystemInfo(rootElem->FirstChildElement("system"));
+
+ /* parse display information */
+ readDisplayInfo(rootElem->FirstChildElement("display"));
+
+ const int64_t parsingEnd = android::elapsedRealtimeNano();
+ ALOGI("Parsing configuration file takes %lf (ms)",
+ (double)(parsingEnd - parsingStart) / 1000000.0);
+
+
+ return true;
+}
+
+
+void ConfigManager::addCameraDevices(const char *devices,
+ unique_ptr<CameraGroup> &aGroup) {
+ stringstream device_list(devices);
+ string token;
+ while (getline(device_list, token, ',')) {
+ aGroup->devices.emplace(token);
+ }
+}
+
+
+std::unique_ptr<ConfigManager> ConfigManager::Create(const char *path) {
+ unique_ptr<ConfigManager> cfgMgr(new ConfigManager(path));
+
+ /*
+ * Read a configuration from XML file
+ *
+ * If this is too slow, ConfigManager::readConfigDataFromBinary() and
+ * ConfigManager::writeConfigDataToBinary()can serialize CameraInfo object
+ * to the filesystem and construct CameraInfo instead; this was
+ * evaluated as 10x faster.
+ */
+ if (!cfgMgr->readConfigDataFromXML()) {
+ return nullptr;
+ } else {
+ return cfgMgr;
+ }
+}
+
diff --git a/automotive/evs/1.1/default/ConfigManager.h b/automotive/evs/1.1/default/ConfigManager.h
new file mode 100644
index 0000000..0275f90
--- /dev/null
+++ b/automotive/evs/1.1/default/ConfigManager.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CONFIG_MANAGER_H
+#define CONFIG_MANAGER_H
+
+#include <vector>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+
+#include <tinyxml2.h>
+
+#include <system/camera_metadata.h>
+#include <log/log.h>
+#include <android/hardware/automotive/evs/1.1/types.h>
+
+#include "ConfigManagerUtil.h"
+
+using namespace std;
+using namespace tinyxml2;
+
+using ::android::hardware::hidl_vec;
+using ::android::hardware::camera::device::V3_2::Stream;
+using ::android::hardware::automotive::evs::V1_1::CameraParam;
+
+/*
+ * Plese note that this is different from what is defined in
+ * libhardware/modules/camera/3_4/metadata/types.h; this has one additional
+ * field to store a framerate.
+ */
+const size_t kStreamCfgSz = 6;
+typedef std::array<int32_t, kStreamCfgSz> RawStreamConfiguration;
+
+class ConfigManager {
+public:
+ static std::unique_ptr<ConfigManager> Create(const char *path = "");
+ ConfigManager(const ConfigManager&) = delete;
+ ConfigManager& operator=(const ConfigManager&) = delete;
+
+ virtual ~ConfigManager();
+
+ /* Camera device's capabilities and metadata */
+ class CameraInfo {
+ public:
+ CameraInfo() :
+ characteristics(nullptr) {
+ /* Nothing to do */
+ }
+
+ virtual ~CameraInfo() {
+ free_camera_metadata(characteristics);
+ }
+
+ /* Allocate memory for camera_metadata_t */
+ bool allocate(size_t entry_cap, size_t data_cap) {
+ if (characteristics != nullptr) {
+ ALOGE("Camera metadata is already allocated");
+ return false;
+ }
+
+ characteristics = allocate_camera_metadata(entry_cap, data_cap);
+ return characteristics != nullptr;
+ }
+
+ /*
+ * List of supported controls that the master client can program.
+ * Paraemters are stored with its valid range
+ */
+ unordered_map<CameraParam,
+ tuple<int32_t, int32_t, int32_t>> controls;
+
+ /* List of supported frame rates */
+ unordered_set<int32_t> frameRates;
+
+ /*
+ * List of supported output stream configurations; each array stores
+ * format, width, height, and direction values in the order.
+ */
+ unordered_map<int32_t, RawStreamConfiguration> streamConfigurations;
+
+ /*
+ * Internal storage for camera metadata. Each entry holds a pointer to
+ * data and number of elements
+ */
+ unordered_map<camera_metadata_tag_t,
+ pair<unique_ptr<void *>, size_t>> cameraMetadata;
+
+ /* Camera module characteristics */
+ camera_metadata_t *characteristics;
+ };
+
+ class CameraGroup {
+ public:
+ CameraGroup() {}
+
+ /* ID of member camera devices */
+ unordered_set<string> devices;
+
+ /* The capture operation of member camera devices are synchronized */
+ bool synchronized = false;
+
+ /*
+ * List of stream configurations that are supposed by all camera devices
+ * in this group.
+ */
+ unordered_map<int32_t, RawStreamConfiguration> streamConfigurations;
+ };
+
+ class SystemInfo {
+ public:
+ /* number of available cameras */
+ int32_t numCameras = 0;
+ };
+
+ class DisplayInfo {
+ public:
+ /*
+ * List of supported input stream configurations; each array stores
+ * format, width, height, and direction values in the order.
+ */
+ unordered_map<int32_t, RawStreamConfiguration> streamConfigurations;
+ };
+
+ /*
+ * Return system information
+ *
+ * @return SystemInfo
+ * Constant reference of SystemInfo.
+ */
+ const SystemInfo &getSystemInfo() {
+ return mSystemInfo;
+ }
+
+ /*
+ * Return a list of cameras
+ *
+ * This function assumes that it is not being called frequently.
+ *
+ * @return vector<string>
+ * A vector that contains unique camera device identifiers.
+ */
+ vector<string> getCameraList() {
+ vector<string> aList;
+ for (auto &v : mCameraInfo) {
+ aList.emplace_back(v.first);
+ }
+
+ return aList;
+ }
+
+
+ /*
+ * Return a list of cameras
+ *
+ * @return CameraGroup
+ * A pointer to a camera group identified by a given id.
+ */
+ unique_ptr<CameraGroup>& getCameraGroup(const string& gid) {
+ return mCameraGroups[gid];
+ }
+
+
+ /*
+ * Return a camera metadata
+ *
+ * @param cameraId
+ * Unique camera node identifier in string
+ *
+ * @return unique_ptr<CameraInfo>
+ * A pointer to CameraInfo that is associated with a given camera
+ * ID. This returns a null pointer if this does not recognize a
+ * given camera identifier.
+ */
+ unique_ptr<CameraInfo>& getCameraInfo(const string cameraId) noexcept {
+ return mCameraInfo[cameraId];
+ }
+
+private:
+ /* Constructors */
+ ConfigManager(const char *xmlPath) :
+ mConfigFilePath(xmlPath) {
+ }
+
+ /* System configuration */
+ SystemInfo mSystemInfo;
+
+ /* Internal data structure for camera device information */
+ unordered_map<string, unique_ptr<CameraInfo>> mCameraInfo;
+
+ /* Internal data structure for camera device information */
+ unordered_map<string, unique_ptr<DisplayInfo>> mDisplayInfo;
+
+ /* Camera groups are stored in <groud id, CameraGroup> hash map */
+ unordered_map<string, unique_ptr<CameraGroup>> mCameraGroups;
+
+ /*
+ * Camera positions are stored in <position, camera id set> hash map.
+ * The position must be one of front, rear, left, and right.
+ */
+ unordered_map<string, unordered_set<string>> mCameraPosition;
+
+ /* A path to XML configuration file */
+ const char *mConfigFilePath;
+
+ /*
+ * Parse a given EVS configuration file and store the information
+ * internally.
+ *
+ * @return bool
+ * True if it completes parsing a file successfully.
+ */
+ bool readConfigDataFromXML() noexcept;
+
+ /*
+ * read the information of the vehicle
+ *
+ * @param aSysElem
+ * A pointer to "system" XML element.
+ */
+ void readSystemInfo(const XMLElement * const aSysElem);
+
+ /*
+ * read the information of camera devices
+ *
+ * @param aCameraElem
+ * A pointer to "camera" XML element that may contain multiple
+ * "device" elements.
+ */
+ void readCameraInfo(const XMLElement * const aCameraElem);
+
+ /*
+ * read display device information
+ *
+ * @param aDisplayElem
+ * A pointer to "display" XML element that may contain multiple
+ * "device" elements.
+ */
+ void readDisplayInfo(const XMLElement * const aDisplayElem);
+
+ /*
+ * read camera device information
+ *
+ * @param aDeviceElem
+ * A pointer to "device" XML element that contains camera module
+ * capability info and its characteristics.
+ *
+ * @return unique_ptr<CameraInfo>
+ * A pointer to CameraInfo class that contains camera module
+ * capability and characteristics. Please note that this transfers
+ * the ownership of created CameraInfo to the caller.
+ */
+ unique_ptr<CameraInfo> readCameraDeviceInfo(const XMLElement *aDeviceElem);
+
+ /*
+ * read camera metadata
+ *
+ * @param aCapElem
+ * A pointer to "cap" XML element.
+ * @param aCamera
+ * A pointer to CameraInfo that is being filled by this method.
+ * @param dataSize
+ * Required size of memory to store camera metadata found in this
+ * method. This is calculated in this method and returned to the
+ * caller for camera_metadata allocation.
+ *
+ * @return size_t
+ * Number of camera metadata entries
+ */
+ size_t readCameraCapabilities(const XMLElement * const aCapElem,
+ unique_ptr<CameraInfo> &aCamera,
+ size_t &dataSize);
+
+ /*
+ * read camera metadata
+ *
+ * @param aParamElem
+ * A pointer to "characteristics" XML element.
+ * @param aCamera
+ * A pointer to CameraInfo that is being filled by this method.
+ * @param dataSize
+ * Required size of memory to store camera metadata found in this
+ * method.
+ *
+ * @return size_t
+ * Number of camera metadata entries
+ */
+ size_t readCameraMetadata(const XMLElement * const aParamElem,
+ unique_ptr<CameraInfo> &aCamera,
+ size_t &dataSize);
+
+ /*
+ * construct camera_metadata_t from camera capabilities and metadata
+ *
+ * @param aCamera
+ * A pointer to CameraInfo that is being filled by this method.
+ * @param totalEntries
+ * Number of camera metadata entries to be added.
+ * @param totalDataSize
+ * Sum of sizes of camera metadata entries to be added.
+ *
+ * @return bool
+ * False if either it fails to allocate memory for camera metadata
+ * or its size is not large enough to add all found camera metadata
+ * entries.
+ */
+ bool constructCameraMetadata(unique_ptr<CameraInfo> &aCamera,
+ const size_t totalEntries,
+ const size_t totalDataSize);
+
+ /*
+ * parse a comma-separated list of camera devices and add them to
+ * CameraGroup.
+ *
+ * @param devices
+ * A comma-separated list of camera device identifiers.
+ * @param aGroup
+ * Camera group which cameras will be added to.
+ */
+ void addCameraDevices(const char *devices,
+ unique_ptr<CameraGroup> &aGroup);
+};
+#endif // CONFIG_MANAGER_H
+
diff --git a/automotive/evs/1.1/default/ConfigManagerUtil.cpp b/automotive/evs/1.1/default/ConfigManagerUtil.cpp
new file mode 100644
index 0000000..8206daa
--- /dev/null
+++ b/automotive/evs/1.1/default/ConfigManagerUtil.cpp
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ConfigManagerUtil.h"
+
+#include <string>
+#include <sstream>
+#include <linux/videodev2.h>
+
+#include <log/log.h>
+#include <system/graphics-base-v1.0.h>
+
+
+bool ConfigManagerUtil::convertToEvsCameraParam(const string &id,
+ CameraParam &camParam) {
+ string trimmed = ConfigManagerUtil::trimString(id);
+ bool success = true;
+
+ if (!trimmed.compare("BRIGHTNESS")) {
+ camParam = CameraParam::BRIGHTNESS;
+ } else if (!trimmed.compare("CONTRAST")) {
+ camParam = CameraParam::CONTRAST;
+ } else if (!trimmed.compare("AUTOGAIN")) {
+ camParam = CameraParam::AUTOGAIN;
+ } else if (!trimmed.compare("GAIN")) {
+ camParam = CameraParam::GAIN;
+ } else if (!trimmed.compare("AUTO_WHITE_BALANCE")) {
+ camParam = CameraParam::AUTO_WHITE_BALANCE;
+ } else if (!trimmed.compare("WHITE_BALANCE_TEMPERATURE")) {
+ camParam = CameraParam::WHITE_BALANCE_TEMPERATURE;
+ } else if (!trimmed.compare("SHARPNESS")) {
+ camParam = CameraParam::SHARPNESS;
+ } else if (!trimmed.compare("AUTO_EXPOSURE")) {
+ camParam = CameraParam::AUTO_EXPOSURE;
+ } else if (!trimmed.compare("ABSOLUTE_EXPOSURE")) {
+ camParam = CameraParam::ABSOLUTE_EXPOSURE;
+ } else if (!trimmed.compare("ABSOLUTE_FOCUS")) {
+ camParam = CameraParam::ABSOLUTE_FOCUS;
+ } else if (!trimmed.compare("AUTO_FOCUS")) {
+ camParam = CameraParam::AUTO_FOCUS;
+ } else if (!trimmed.compare("ABSOLUTE_ZOOM")) {
+ camParam = CameraParam::ABSOLUTE_ZOOM;
+ } else {
+ success = false;
+ }
+
+ return success;
+}
+
+
+bool ConfigManagerUtil::convertToPixelFormat(const string &format,
+ int32_t &pixFormat) {
+ string trimmed = ConfigManagerUtil::trimString(format);
+ bool success = true;
+
+ if (!trimmed.compare("RGBA_8888")) {
+ pixFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ } else if (!trimmed.compare("YCRCB_420_SP")) {
+ pixFormat = HAL_PIXEL_FORMAT_YCRCB_420_SP;
+ } else if (!trimmed.compare("YCBCR_422_I")) {
+ pixFormat = HAL_PIXEL_FORMAT_YCBCR_422_I;
+ } else {
+ success = false;
+ }
+
+ return success;
+}
+
+
+bool ConfigManagerUtil::convertToMetadataTag(const char *name,
+ camera_metadata_tag &aTag) {
+ if (!strcmp(name, "LENS_DISTORTION")) {
+ aTag = ANDROID_LENS_DISTORTION;
+ } else if (!strcmp(name, "LENS_INTRINSIC_CALIBRATION")) {
+ aTag = ANDROID_LENS_INTRINSIC_CALIBRATION;
+ } else if (!strcmp(name, "LENS_POSE_ROTATION")) {
+ aTag = ANDROID_LENS_POSE_ROTATION;
+ } else if (!strcmp(name, "LENS_POSE_TRANSLATION")) {
+ aTag = ANDROID_LENS_POSE_TRANSLATION;
+ } else {
+ return false;
+ }
+
+ return true;
+}
+
+
+float *ConfigManagerUtil::convertFloatArray(const char *sz, const char *vals,
+ size_t &count, const char delimiter) {
+ string size_string(sz);
+ string value_string(vals);
+
+ count = stoi(size_string);
+ float *result = new float[count];
+ stringstream values(value_string);
+
+ int32_t idx = 0;
+ string token;
+ while (getline(values, token, delimiter)) {
+ result[idx++] = stof(token);
+ }
+
+ return result;
+}
+
+
+string ConfigManagerUtil::trimString(const string &src, const string &ws) {
+ const auto s = src.find_first_not_of(ws);
+ if (s == string::npos) {
+ return "";
+ }
+
+ const auto e = src.find_last_not_of(ws);
+ const auto r = e - s + 1;
+
+ return src.substr(s, r);
+}
+
diff --git a/automotive/evs/1.1/default/ConfigManagerUtil.h b/automotive/evs/1.1/default/ConfigManagerUtil.h
new file mode 100644
index 0000000..8c89ae7
--- /dev/null
+++ b/automotive/evs/1.1/default/ConfigManagerUtil.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef CONFIG_MANAGER_UTIL_H
+#define CONFIG_MANAGER_UTIL_H
+
+#include <utility>
+#include <string>
+#include <system/camera_metadata.h>
+#include <android/hardware/automotive/evs/1.1/types.h>
+
+using namespace std;
+using ::android::hardware::automotive::evs::V1_1::CameraParam;
+
+
+class ConfigManagerUtil {
+public:
+ /**
+ * Convert a given string into V4L2_CID_*
+ */
+ static bool convertToEvsCameraParam(const string &id,
+ CameraParam &camParam);
+ /**
+ * Convert a given string into android.hardware.graphics.common.PixelFormat
+ */
+ static bool convertToPixelFormat(const string &format,
+ int32_t &pixelFormat);
+ /**
+ * Convert a given string into corresponding camera metadata data tag defined in
+ * system/media/camera/include/system/camera_metadta_tags.h
+ */
+ static bool convertToMetadataTag(const char *name,
+ camera_metadata_tag &aTag);
+ /**
+ * Convert a given string into a floating value array
+ */
+ static float *convertFloatArray(const char *sz,
+ const char *vals,
+ size_t &count,
+ const char delimiter = ',');
+ /**
+ * Trim a string
+ */
+ static string trimString(const string &src,
+ const string &ws = " \n\r\t\f\v");
+};
+
+#endif // CONFIG_MANAGER_UTIL_H
+
diff --git a/automotive/evs/1.1/default/EvsCamera.cpp b/automotive/evs/1.1/default/EvsCamera.cpp
index 2d55566..5ba753d 100644
--- a/automotive/evs/1.1/default/EvsCamera.cpp
+++ b/automotive/evs/1.1/default/EvsCamera.cpp
@@ -40,28 +40,21 @@
const unsigned MAX_BUFFERS_IN_FLIGHT = 100;
-EvsCamera::EvsCamera(const char *id) :
+EvsCamera::EvsCamera(const char *id,
+ unique_ptr<ConfigManager::CameraInfo> &camInfo) :
mFramesAllowed(0),
mFramesInUse(0),
- mStreamState(STOPPED) {
+ mStreamState(STOPPED),
+ mCameraInfo(camInfo) {
ALOGD("EvsCamera instantiated");
- mDescription.cameraId = id;
+ /* set a camera id */
+ mDescription.v1.cameraId = id;
- // Set up dummy data for testing
- if (mDescription.cameraId == kCameraName_Backup) {
- mWidth = 640; // full NTSC/VGA
- mHeight = 480; // full NTSC/VGA
- mDescription.vendorFlags = 0xFFFFFFFF; // Arbitrary value
- } else {
- mWidth = 320; // 1/2 NTSC/VGA
- mHeight = 240; // 1/2 NTSC/VGA
- }
-
- mFormat = HAL_PIXEL_FORMAT_RGBA_8888;
- mUsage = GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_CAMERA_WRITE |
- GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_RARELY;
+ /* set camera metadata */
+ mDescription.metadata.setToExternal((uint8_t *)camInfo->characteristics,
+ get_camera_metadata_size(camInfo->characteristics));
}
@@ -109,7 +102,7 @@
ALOGD("getCameraInfo");
// Send back our self description
- _hidl_cb(mDescription);
+ _hidl_cb(mDescription.v1);
return Void();
}
@@ -194,7 +187,7 @@
// Block outside the mutex until the "stop" flag has been acknowledged
// We won't send any more frames, but the client might still get some already in flight
- ALOGD("Waiting for stream thread to end..");
+ ALOGD("Waiting for stream thread to end...");
lock.unlock();
mCaptureThread.join();
lock.lock();
@@ -238,6 +231,15 @@
// Methods from ::android::hardware::automotive::evs::V1_1::IEvsCamera follow.
+Return<void> EvsCamera::getCameraInfo_1_1(getCameraInfo_1_1_cb _hidl_cb) {
+ ALOGD("getCameraInfo_1_1");
+
+ // Send back our self description
+ _hidl_cb(mDescription);
+ return Void();
+}
+
+
Return<EvsResult> EvsCamera::doneWithFrame_1_1(const BufferDesc_1_1& bufDesc) {
std::lock_guard <std::mutex> lock(mAccessLock);
returnBuffer(bufDesc.bufferId, bufDesc.buffer.nativeHandle);
@@ -278,8 +280,29 @@
}
-Return<void> EvsCamera::setParameter(CameraParam id, int32_t value,
- setParameter_cb _hidl_cb) {
+Return<void> EvsCamera::getParameterList(getParameterList_cb _hidl_cb) {
+ hidl_vec<CameraParam> hidlCtrls;
+ hidlCtrls.resize(mCameraInfo->controls.size());
+ unsigned idx = 0;
+ for (auto& [cid, cfg] : mCameraInfo->controls) {
+ hidlCtrls[idx++] = cid;
+ }
+
+ _hidl_cb(hidlCtrls);
+ return Void();
+}
+
+
+Return<void> EvsCamera::getIntParameterRange(CameraParam id,
+ getIntParameterRange_cb _hidl_cb) {
+ auto range = mCameraInfo->controls[id];
+ _hidl_cb(get<0>(range), get<1>(range), get<2>(range));
+ return Void();
+}
+
+
+Return<void> EvsCamera::setIntParameter(CameraParam id, int32_t value,
+ setIntParameter_cb _hidl_cb) {
// Default implementation does not support this.
(void)id;
(void)value;
@@ -288,7 +311,8 @@
}
-Return<void> EvsCamera::getParameter(CameraParam id, getParameter_cb _hidl_cb) {
+Return<void> EvsCamera::getIntParameter(CameraParam id,
+ getIntParameter_cb _hidl_cb) {
// Default implementation does not support this.
(void)id;
_hidl_cb(EvsResult::INVALID_ARG, 0);
@@ -471,9 +495,7 @@
fillTestFrame(newBuffer);
// Issue the (asynchronous) callback to the client -- can't be holding the lock
- EvsEvent event;
- event.buffer(newBuffer);
- auto result = mStream->notifyEvent(event);
+ auto result = mStream->deliverFrame_1_1(newBuffer);
if (result.isOk()) {
ALOGD("Delivered %p as id %d",
newBuffer.buffer.nativeHandle.getNativeHandle(), newBuffer.bufferId);
@@ -506,10 +528,8 @@
// If we've been asked to stop, send an event to signal the actual end of stream
EvsEvent event;
- InfoEventDesc desc = {};
- desc.aType = InfoEventType::STREAM_STOPPED;
- event.info(desc);
- auto result = mStream->notifyEvent(event);
+ event.aType = EvsEventType::STREAM_STOPPED;
+ auto result = mStream->notify(event);
if (!result.isOk()) {
ALOGE("Error delivering end of stream marker");
}
@@ -616,6 +636,38 @@
}
+sp<EvsCamera> EvsCamera::Create(const char *deviceName) {
+ unique_ptr<ConfigManager::CameraInfo> nullCamInfo = nullptr;
+
+ return Create(deviceName, nullCamInfo);
+}
+
+
+sp<EvsCamera> EvsCamera::Create(const char *deviceName,
+ unique_ptr<ConfigManager::CameraInfo> &camInfo,
+ const Stream *streamCfg) {
+ sp<EvsCamera> evsCamera = new EvsCamera(deviceName, camInfo);
+ if (evsCamera == nullptr) {
+ return nullptr;
+ }
+
+ /* default implementation does not use a given configuration */
+ (void)streamCfg;
+
+ /* Use the first resolution from the list for the testing */
+ auto it = camInfo->streamConfigurations.begin();
+ evsCamera->mWidth = it->second[1];
+ evsCamera->mHeight = it->second[2];
+ evsCamera->mDescription.v1.vendorFlags = 0xFFFFFFFF; // Arbitrary test value
+
+ evsCamera->mFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+ evsCamera->mUsage = GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_CAMERA_WRITE |
+ GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_RARELY;
+
+ return evsCamera;
+}
+
+
} // namespace implementation
} // namespace V1_0
} // namespace evs
diff --git a/automotive/evs/1.1/default/EvsCamera.h b/automotive/evs/1.1/default/EvsCamera.h
index 47a3164..c15b4b1 100644
--- a/automotive/evs/1.1/default/EvsCamera.h
+++ b/automotive/evs/1.1/default/EvsCamera.h
@@ -25,6 +25,8 @@
#include <thread>
+#include "ConfigManager.h"
+
using BufferDesc_1_0 = ::android::hardware::automotive::evs::V1_0::BufferDesc;
using BufferDesc_1_1 = ::android::hardware::automotive::evs::V1_1::BufferDesc;
using IEvsCameraStream_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCameraStream;
@@ -59,18 +61,28 @@
Return<EvsResult> setExtendedInfo(uint32_t opaqueIdentifier, int32_t opaqueValue) override;
// Methods from ::android::hardware::automotive::evs::V1_1::IEvsCamera follow.
+ Return<void> getCameraInfo_1_1(getCameraInfo_1_1_cb _hidl_cb) override;
Return<EvsResult> pauseVideoStream() override;
Return<EvsResult> resumeVideoStream() override;
Return<EvsResult> doneWithFrame_1_1(const BufferDesc_1_1& buffer) override;
Return<EvsResult> setMaster() override;
Return<EvsResult> forceMaster(const sp<IEvsDisplay>& display) override;
Return<EvsResult> unsetMaster() override;
- Return<void> setParameter(CameraParam id, int32_t value,
- setParameter_cb _hidl_cb) override;
- Return<void> getParameter(CameraParam id, getParameter_cb _hidl_cb) override;
+ Return<void> getParameterList(getParameterList_cb _hidl_cb) override;
+ Return<void> getIntParameterRange(CameraParam id,
+ getIntParameterRange_cb _hidl_cb) override;
+ Return<void> setIntParameter(CameraParam id, int32_t value,
+ setIntParameter_cb _hidl_cb) override;
+ Return<void> getIntParameter(CameraParam id,
+ getIntParameter_cb _hidl_cb) override;
- // Implementation details
- EvsCamera(const char *id);
+ static sp<EvsCamera> Create(const char *deviceName);
+ static sp<EvsCamera> Create(const char *deviceName,
+ unique_ptr<ConfigManager::CameraInfo> &camInfo,
+ const Stream *streamCfg = nullptr);
+ EvsCamera(const EvsCamera&) = delete;
+ EvsCamera& operator=(const EvsCamera&) = delete;
+
virtual ~EvsCamera() override;
void forceShutdown(); // This gets called if another caller "steals" ownership of the camera
@@ -79,7 +91,10 @@
static const char kCameraName_Backup[];
private:
+ EvsCamera(const char *id,
+ unique_ptr<ConfigManager::CameraInfo> &camInfo);
// These three functions are expected to be called while mAccessLock is held
+ //
bool setAvailableFrames_Locked(unsigned bufferCount);
unsigned increaseAvailableFrames_Locked(unsigned numToAdd);
unsigned decreaseAvailableFrames_Locked(unsigned numToRemove);
@@ -124,6 +139,9 @@
// Synchronization necessary to deconflict mCaptureThread from the main service thread
std::mutex mAccessLock;
+
+ // Static camera module information
+ unique_ptr<ConfigManager::CameraInfo> &mCameraInfo;
};
} // namespace implementation
diff --git a/automotive/evs/1.1/default/EvsEnumerator.cpp b/automotive/evs/1.1/default/EvsEnumerator.cpp
index b324907..a010729 100644
--- a/automotive/evs/1.1/default/EvsEnumerator.cpp
+++ b/automotive/evs/1.1/default/EvsEnumerator.cpp
@@ -33,6 +33,7 @@
// constructs a new instance for each client.
std::list<EvsEnumerator::CameraRecord> EvsEnumerator::sCameraList;
wp<EvsDisplay> EvsEnumerator::sActiveDisplay;
+unique_ptr<ConfigManager> EvsEnumerator::sConfigManager;
EvsEnumerator::EvsEnumerator() {
@@ -40,9 +41,11 @@
// Add sample camera data to our list of cameras
// In a real driver, this would be expected to can the available hardware
- sCameraList.emplace_back(EvsCamera::kCameraName_Backup);
- sCameraList.emplace_back("LaneView");
- sCameraList.emplace_back("right turn");
+ sConfigManager =
+ ConfigManager::Create("/etc/automotive/evs/evs_sample_configuration.xml");
+ for (auto v : sConfigManager->getCameraList()) {
+ sCameraList.emplace_back(v.c_str());
+ }
}
@@ -57,7 +60,7 @@
std::vector<CameraDesc_1_0> descriptions;
descriptions.reserve(numCameras);
for (const auto& cam : sCameraList) {
- descriptions.push_back( cam.desc );
+ descriptions.push_back( cam.desc.v1 );
}
// Encapsulate our camera descriptions in the HIDL vec type
@@ -78,7 +81,7 @@
// Find the named camera
CameraRecord *pRecord = nullptr;
for (auto &&cam : sCameraList) {
- if (cam.desc.cameraId == cameraId) {
+ if (cam.desc.v1.cameraId == cameraId) {
// Found a match!
pRecord = &cam;
break;
@@ -99,7 +102,12 @@
}
// Construct a camera instance for the caller
- pActiveCamera = new EvsCamera(cameraId.c_str());
+ if (sConfigManager == nullptr) {
+ pActiveCamera = EvsCamera::Create(cameraId.c_str());
+ } else {
+ pActiveCamera = EvsCamera::Create(cameraId.c_str(),
+ sConfigManager->getCameraInfo(cameraId));
+ }
pRecord->activeInstance = pActiveCamera;
if (pActiveCamera == nullptr) {
ALOGE("Failed to allocate new EvsCamera object for %s\n", cameraId.c_str());
@@ -120,15 +128,15 @@
// Get the camera id so we can find it in our list
std::string cameraId;
- pCamera_1_1->getCameraInfo([&cameraId](CameraDesc desc) {
- cameraId = desc.cameraId;
+ pCamera_1_1->getCameraInfo_1_1([&cameraId](CameraDesc desc) {
+ cameraId = desc.v1.cameraId;
}
);
// Find the named camera
CameraRecord *pRecord = nullptr;
for (auto &&cam : sCameraList) {
- if (cam.desc.cameraId == cameraId) {
+ if (cam.desc.v1.cameraId == cameraId) {
// Found a match!
pRecord = &cam;
break;
@@ -209,6 +217,89 @@
}
+// Methods from ::android::hardware::automotive::evs::V1_1::IEvsEnumerator follow.
+Return<void> EvsEnumerator::getCameraList_1_1(getCameraList_1_1_cb _hidl_cb) {
+ ALOGD("getCameraList");
+
+ const unsigned numCameras = sCameraList.size();
+
+ // Build up a packed array of CameraDesc for return
+ // NOTE: Only has to live until the callback returns
+ std::vector<CameraDesc_1_1> descriptions;
+ descriptions.reserve(numCameras);
+ for (const auto& cam : sCameraList) {
+ descriptions.push_back( cam.desc );
+ }
+
+ // Encapsulate our camera descriptions in the HIDL vec type
+ hidl_vec<CameraDesc_1_1> hidlCameras(descriptions);
+
+ // Send back the results
+ ALOGD("reporting %zu cameras available", hidlCameras.size());
+ _hidl_cb(hidlCameras);
+
+ // HIDL convention says we return Void if we sent our result back via callback
+ return Void();
+}
+
+Return<sp<IEvsCamera_1_1>>
+EvsEnumerator::openCamera_1_1(const hidl_string& cameraId,
+ const Stream& streamCfg) {
+ // Find the named camera
+ CameraRecord *pRecord = nullptr;
+ for (auto &&cam : sCameraList) {
+ if (cam.desc.v1.cameraId == cameraId) {
+ // Found a match!
+ pRecord = &cam;
+ break;
+ }
+ }
+
+ // Is this a recognized camera id?
+ if (!pRecord) {
+ ALOGE("Requested camera %s not found", cameraId.c_str());
+ return nullptr;
+ }
+
+ // Has this camera already been instantiated by another caller?
+ sp<EvsCamera> pActiveCamera = pRecord->activeInstance.promote();
+ if (pActiveCamera != nullptr) {
+ ALOGW("Killing previous camera because of new caller");
+ closeCamera(pActiveCamera);
+ }
+
+ // Construct a camera instance for the caller
+ if (sConfigManager == nullptr) {
+ pActiveCamera = EvsCamera::Create(cameraId.c_str());
+ } else {
+ pActiveCamera = EvsCamera::Create(cameraId.c_str(),
+ sConfigManager->getCameraInfo(cameraId),
+ &streamCfg);
+ }
+
+ pRecord->activeInstance = pActiveCamera;
+ if (pActiveCamera == nullptr) {
+ ALOGE("Failed to allocate new EvsCamera object for %s\n", cameraId.c_str());
+ }
+
+ return pActiveCamera;
+}
+
+
+EvsEnumerator::CameraRecord* EvsEnumerator::findCameraById(const std::string& cameraId) {
+ // Find the named camera
+ CameraRecord *pRecord = nullptr;
+ for (auto &&cam : sCameraList) {
+ if (cam.desc.v1.cameraId == cameraId) {
+ // Found a match!
+ pRecord = &cam;
+ break;
+ }
+ }
+
+ return pRecord;
+}
+
} // namespace implementation
} // namespace V1_1
} // namespace evs
diff --git a/automotive/evs/1.1/default/EvsEnumerator.h b/automotive/evs/1.1/default/EvsEnumerator.h
index 11c2170..475ec76 100644
--- a/automotive/evs/1.1/default/EvsEnumerator.h
+++ b/automotive/evs/1.1/default/EvsEnumerator.h
@@ -17,18 +17,20 @@
#ifndef ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERAENUMERATOR_H
#define ANDROID_HARDWARE_AUTOMOTIVE_EVS_V1_1_EVSCAMERAENUMERATOR_H
-#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include <android/hardware/automotive/evs/1.1/IEvsEnumerator.h>
#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
#include <list>
+#include "ConfigManager.h"
+
using ::android::hardware::automotive::evs::V1_0::EvsResult;
using ::android::hardware::automotive::evs::V1_0::IEvsDisplay;
using ::android::hardware::automotive::evs::V1_0::DisplayState;
-using ::android::hardware::automotive::evs::V1_0::IEvsEnumerator;
using IEvsCamera_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCamera;
using IEvsCamera_1_1 = ::android::hardware::automotive::evs::V1_1::IEvsCamera;
using CameraDesc_1_0 = ::android::hardware::automotive::evs::V1_0::CameraDesc;
+using CameraDesc_1_1 = ::android::hardware::automotive::evs::V1_1::CameraDesc;
namespace android {
@@ -53,6 +55,11 @@
Return<void> closeDisplay(const ::android::sp<IEvsDisplay>& display) override;
Return<DisplayState> getDisplayState() override;
+ // Methods from ::android::hardware::automotive::evs::V1_1::IEvsEnumerator follow.
+ Return<void> getCameraList_1_1(getCameraList_1_1_cb _hidl_cb) override;
+ Return<sp<IEvsCamera_1_1>> openCamera_1_1(const hidl_string& cameraId,
+ const Stream& streamCfg) override;
+
// Implementation details
EvsEnumerator();
@@ -61,14 +68,20 @@
// That is to say, this is effectively a singleton despite the fact that HIDL
// constructs a new instance for each client.
struct CameraRecord {
- CameraDesc_1_0 desc;
+ CameraDesc_1_1 desc;
wp<EvsCamera> activeInstance;
- CameraRecord(const char *cameraId) : desc() { desc.cameraId = cameraId; }
+ CameraRecord(const char *cameraId) : desc() { desc.v1.cameraId = cameraId; }
};
- static std::list<CameraRecord> sCameraList;
- static wp<EvsDisplay> sActiveDisplay; // Weak pointer. Object destructs if client dies.
+ static CameraRecord* findCameraById(const std::string& cameraId);
+
+ static std::list<CameraRecord> sCameraList;
+
+ // Weak pointer. Object destructs if client dies.
+ static wp<EvsDisplay> sActiveDisplay;
+
+ static unique_ptr<ConfigManager> sConfigManager;
};
} // namespace implementation
diff --git a/automotive/evs/1.1/default/resources/evs_default_configuration.xml b/automotive/evs/1.1/default/resources/evs_default_configuration.xml
new file mode 100644
index 0000000..692102e
--- /dev/null
+++ b/automotive/evs/1.1/default/resources/evs_default_configuration.xml
@@ -0,0 +1,68 @@
+<?xml version='1.0' encoding='utf-8'?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Exterior View System Example Configuration
+
+ Android Automotive axes are used to define coordinates.
+ See https://source.android.com/devices/sensors/sensor-types#auto_axes
+
+ Use evs_configuration.dtd with xmllint tool, to validate XML configuration file
+-->
+
+<configuration>
+ <!-- system configuration -->
+ <system>
+ <!-- number of cameras available to EVS -->
+ <num_cameras value='1'/>
+ </system>
+
+ <!-- camera device information -->
+ <camera>
+ <!-- camera device starts -->
+ <device id='/dev/video1' position='rear'>
+ <caps>
+ <!-- list of supported controls -->
+ <supported_controls>
+ <control name='BRIGHTNESS' min='0' max='255'/>
+ <control name='CONTRAST' min='0' max='255'/>
+ </supported_controls>
+
+ <stream id='0' width='640' height='360' format='RGBA_8888' framerate='30'/>
+ </caps>
+
+ <!-- list of parameters -->
+ <characteristics>
+ <!-- Camera intrinsic calibration matrix. See
+ https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#LENS_INTRINSIC_CALIBRATION
+ -->
+ <parameter
+ name='LENS_INTRINSIC_CALIBRATION'
+ type='float'
+ size='5'
+ value='0.0,0.0,0.0,0.0,0.0'
+ />
+ </characteristics>
+ </device>
+ </camera>
+ <display>
+ <device id='display0' position='driver'>
+ <caps>
+ <!-- list of supported inpu stream configurations -->
+ <stream id='0' width='1280' height='720' format='RGBA_8888' framerate='30'/>
+ </caps>
+ </device>
+ </display>
+</configuration>
+
diff --git a/automotive/evs/1.1/default/service.cpp b/automotive/evs/1.1/default/service.cpp
index 128a14a..5135864 100644
--- a/automotive/evs/1.1/default/service.cpp
+++ b/automotive/evs/1.1/default/service.cpp
@@ -33,7 +33,7 @@
using android::hardware::joinRpcThreadpool;
// Generated HIDL files
-using android::hardware::automotive::evs::V1_0::IEvsEnumerator;
+using android::hardware::automotive::evs::V1_1::IEvsEnumerator;
using android::hardware::automotive::evs::V1_0::IEvsDisplay;
// The namespace in which all our implementation code lives
diff --git a/automotive/evs/1.1/types.hal b/automotive/evs/1.1/types.hal
index 2c6b2ed..dcb2abb 100644
--- a/automotive/evs/1.1/types.hal
+++ b/automotive/evs/1.1/types.hal
@@ -21,6 +21,22 @@
import @1.0::DisplayState;
import @1.0::EvsResult;
import android.hardware.graphics.common@1.2::HardwareBuffer;
+import android.hardware.camera.device@3.2::CameraMetadata;
+
+/**
+ * Structure describing the basic properties of an EVS camera, extended from its
+ * v1.0 declaration.
+ *
+ * The HAL is responsible for filling out this structure for each
+ * EVS camera in the system.
+ */
+struct CameraDesc {
+ @1.0::CameraDesc v1;
+ /**
+ * Store camera metadata such as lens characteristics.
+ */
+ CameraMetadata metadata;
+};
/**
* Structure representing an image buffer through our APIs
@@ -50,7 +66,7 @@
/**
* Types of informative streaming events
*/
-enum InfoEventType : uint32_t {
+enum EvsEventType : uint32_t {
/**
* Video stream is started
*/
@@ -81,11 +97,11 @@
/**
* Structure that describes informative events occurred during EVS is streaming
*/
-struct InfoEventDesc {
+struct EvsEvent {
/**
* Type of an informative event
*/
- InfoEventType aType;
+ EvsEventType aType;
/**
* Possible additional information
*/
@@ -93,20 +109,6 @@
};
/**
- * EVS event definition
- */
-safe_union EvsEvent {
- /**
- * A buffer descriptor of an image frame
- */
- BufferDesc buffer;
- /**
- * General streaming events
- */
- InfoEventDesc info;
-};
-
-/**
* EVS Camera Parameter
*/
enum CameraParam : uint32_t {
@@ -127,14 +129,6 @@
*/
GAIN,
/**
- * Mirror the image horizontally
- */
- HFLIP,
- /**
- * Mirror the image vertically
- */
- VFLIP,
- /**
* Automatic Whitebalance
*/
AUTO_WHITE_BALANCE,
@@ -156,11 +150,6 @@
*/
ABSOLUTE_EXPOSURE,
/**
- * When AEC is running in either auto or aperture priority, this parameter
- * sets whether a frame rate varies.
- */
- AUTO_EXPOSURE_PRIORITY,
- /**
* Set the focal point of the camera to the specified position. This
* parameter may not be effective when auto focus is enabled.
*/
diff --git a/automotive/evs/1.1/vts/functional/Android.bp b/automotive/evs/1.1/vts/functional/Android.bp
index 55c50a4..4753933 100644
--- a/automotive/evs/1.1/vts/functional/Android.bp
+++ b/automotive/evs/1.1/vts/functional/Android.bp
@@ -23,6 +23,7 @@
defaults: ["VtsHalTargetTestDefaults"],
shared_libs: [
"libui",
+ "libcamera_metadata",
],
static_libs: [
"android.hardware.automotive.evs@1.0",
@@ -31,6 +32,7 @@
"android.hardware.graphics.common@1.0",
"android.hardware.graphics.common@1.1",
"android.hardware.graphics.common@1.2",
+ "android.hardware.camera.device@3.2",
],
test_suites: ["general-tests"],
cflags: [
diff --git a/automotive/evs/1.1/vts/functional/FrameHandler.cpp b/automotive/evs/1.1/vts/functional/FrameHandler.cpp
index 1627689..6d53652 100644
--- a/automotive/evs/1.1/vts/functional/FrameHandler.cpp
+++ b/automotive/evs/1.1/vts/functional/FrameHandler.cpp
@@ -138,93 +138,93 @@
}
-Return<void> FrameHandler::notifyEvent(const EvsEvent& event) {
- // Local flag we use to keep track of when the stream is stopping
- auto type = event.getDiscriminator();
- if (type == EvsEvent::hidl_discriminator::info) {
- mLock.lock();
- mLatestEventDesc = event.info();
- if (mLatestEventDesc.aType == InfoEventType::STREAM_STOPPED) {
- // Signal that the last frame has been received and the stream is stopped
- mRunning = false;
- } else if (mLatestEventDesc.aType == InfoEventType::PARAMETER_CHANGED) {
- ALOGD("Camera parameter 0x%X is changed to 0x%X",
- mLatestEventDesc.payload[0], mLatestEventDesc.payload[1]);
+Return<void> FrameHandler::deliverFrame_1_1(const BufferDesc_1_1& bufDesc) {
+ const AHardwareBuffer_Desc* pDesc =
+ reinterpret_cast<const AHardwareBuffer_Desc *>(&bufDesc.buffer.description);
+ ALOGD("Received a frame from the camera (%p)",
+ bufDesc.buffer.nativeHandle.getNativeHandle());
+
+ // Store a dimension of a received frame.
+ mFrameWidth = pDesc->width;
+ mFrameHeight = pDesc->height;
+
+ // If we were given an opened display at construction time, then send the received
+ // image back down the camera.
+ if (mDisplay.get()) {
+ // Get the output buffer we'll use to display the imagery
+ BufferDesc_1_0 tgtBuffer = {};
+ mDisplay->getTargetBuffer([&tgtBuffer](const BufferDesc_1_0& buff) {
+ tgtBuffer = buff;
+ }
+ );
+
+ if (tgtBuffer.memHandle == nullptr) {
+ printf("Didn't get target buffer - frame lost\n");
+ ALOGE("Didn't get requested output buffer -- skipping this frame.");
} else {
- ALOGD("Received an event %s", eventToString(mLatestEventDesc.aType));
- }
- mLock.unlock();
- mEventSignal.notify_all();
- } else {
- auto bufDesc = event.buffer();
- const AHardwareBuffer_Desc* pDesc =
- reinterpret_cast<const AHardwareBuffer_Desc *>(&bufDesc.buffer.description);
- ALOGD("Received a frame from the camera (%p)",
- bufDesc.buffer.nativeHandle.getNativeHandle());
+ // Copy the contents of the of buffer.memHandle into tgtBuffer
+ copyBufferContents(tgtBuffer, bufDesc);
- // Store a dimension of a received frame.
- mFrameWidth = pDesc->width;
- mFrameHeight = pDesc->height;
-
- // If we were given an opened display at construction time, then send the received
- // image back down the camera.
- if (mDisplay.get()) {
- // Get the output buffer we'll use to display the imagery
- BufferDesc_1_0 tgtBuffer = {};
- mDisplay->getTargetBuffer([&tgtBuffer](const BufferDesc_1_0& buff) {
- tgtBuffer = buff;
- }
- );
-
- if (tgtBuffer.memHandle == nullptr) {
- printf("Didn't get target buffer - frame lost\n");
- ALOGE("Didn't get requested output buffer -- skipping this frame.");
+ // Send the target buffer back for display
+ Return<EvsResult> result = mDisplay->returnTargetBufferForDisplay(tgtBuffer);
+ if (!result.isOk()) {
+ printf("HIDL error on display buffer (%s)- frame lost\n",
+ result.description().c_str());
+ ALOGE("Error making the remote function call. HIDL said %s",
+ result.description().c_str());
+ } else if (result != EvsResult::OK) {
+ printf("Display reported error - frame lost\n");
+ ALOGE("We encountered error %d when returning a buffer to the display!",
+ (EvsResult) result);
} else {
- // Copy the contents of the of buffer.memHandle into tgtBuffer
- copyBufferContents(tgtBuffer, bufDesc);
-
- // Send the target buffer back for display
- Return<EvsResult> result = mDisplay->returnTargetBufferForDisplay(tgtBuffer);
- if (!result.isOk()) {
- printf("HIDL error on display buffer (%s)- frame lost\n",
- result.description().c_str());
- ALOGE("Error making the remote function call. HIDL said %s",
- result.description().c_str());
- } else if (result != EvsResult::OK) {
- printf("Display reported error - frame lost\n");
- ALOGE("We encountered error %d when returning a buffer to the display!",
- (EvsResult) result);
- } else {
- // Everything looks good!
- // Keep track so tests or watch dogs can monitor progress
- mLock.lock();
- mFramesDisplayed++;
- mLock.unlock();
- }
+ // Everything looks good!
+ // Keep track so tests or watch dogs can monitor progress
+ mLock.lock();
+ mFramesDisplayed++;
+ mLock.unlock();
}
}
-
-
- switch (mReturnMode) {
- case eAutoReturn:
- // Send the camera buffer back now that the client has seen it
- ALOGD("Calling doneWithFrame");
- // TODO: Why is it that we get a HIDL crash if we pass back the cloned buffer?
- mCamera->doneWithFrame_1_1(bufDesc);
- break;
- case eNoAutoReturn:
- // Hang onto the buffer handle for now -- the client will return it explicitly later
- mHeldBuffers.push(bufDesc);
- }
-
- mLock.lock();
- ++mFramesReceived;
- mLock.unlock();
- mFrameSignal.notify_all();
-
- ALOGD("Frame handling complete");
}
+
+ switch (mReturnMode) {
+ case eAutoReturn:
+ // Send the camera buffer back now that the client has seen it
+ ALOGD("Calling doneWithFrame");
+ mCamera->doneWithFrame_1_1(bufDesc);
+ break;
+ case eNoAutoReturn:
+ // Hang onto the buffer handle for now -- the client will return it explicitly later
+ mHeldBuffers.push(bufDesc);
+ }
+
+ mLock.lock();
+ ++mFramesReceived;
+ mLock.unlock();
+ mFrameSignal.notify_all();
+
+ ALOGD("Frame handling complete");
+
+ return Void();
+}
+
+
+Return<void> FrameHandler::notify(const EvsEvent& event) {
+ // Local flag we use to keep track of when the stream is stopping
+ mLock.lock();
+ mLatestEventDesc = event;
+ if (mLatestEventDesc.aType == EvsEventType::STREAM_STOPPED) {
+ // Signal that the last frame has been received and the stream is stopped
+ mRunning = false;
+ } else if (mLatestEventDesc.aType == EvsEventType::PARAMETER_CHANGED) {
+ ALOGD("Camera parameter 0x%X is changed to 0x%X",
+ mLatestEventDesc.payload[0], mLatestEventDesc.payload[1]);
+ } else {
+ ALOGD("Received an event %s", eventToString(mLatestEventDesc.aType));
+ }
+ mLock.unlock();
+ mEventSignal.notify_all();
+
return Void();
}
@@ -342,18 +342,18 @@
}
}
-bool FrameHandler::waitForEvent(const InfoEventType aTargetEvent,
- InfoEventDesc &eventDesc) {
+bool FrameHandler::waitForEvent(const EvsEventType aTargetEvent,
+ EvsEvent &event) {
// Wait until we get an expected parameter change event.
std::unique_lock<std::mutex> lock(mLock);
auto now = std::chrono::system_clock::now();
bool result = mEventSignal.wait_until(lock, now + 5s,
- [this, aTargetEvent, &eventDesc](){
+ [this, aTargetEvent, &event](){
bool flag = mLatestEventDesc.aType == aTargetEvent;
if (flag) {
- eventDesc.aType = mLatestEventDesc.aType;
- eventDesc.payload[0] = mLatestEventDesc.payload[0];
- eventDesc.payload[1] = mLatestEventDesc.payload[1];
+ event.aType = mLatestEventDesc.aType;
+ event.payload[0] = mLatestEventDesc.payload[0];
+ event.payload[1] = mLatestEventDesc.payload[1];
}
return flag;
@@ -363,21 +363,22 @@
return !result;
}
-const char *FrameHandler::eventToString(const InfoEventType aType) {
+const char *FrameHandler::eventToString(const EvsEventType aType) {
switch (aType) {
- case InfoEventType::STREAM_STARTED:
+ case EvsEventType::STREAM_STARTED:
return "STREAM_STARTED";
- case InfoEventType::STREAM_STOPPED:
+ case EvsEventType::STREAM_STOPPED:
return "STREAM_STOPPED";
- case InfoEventType::FRAME_DROPPED:
+ case EvsEventType::FRAME_DROPPED:
return "FRAME_DROPPED";
- case InfoEventType::TIMEOUT:
+ case EvsEventType::TIMEOUT:
return "TIMEOUT";
- case InfoEventType::PARAMETER_CHANGED:
+ case EvsEventType::PARAMETER_CHANGED:
return "PARAMETER_CHANGED";
- case InfoEventType::MASTER_RELEASED:
+ case EvsEventType::MASTER_RELEASED:
return "MASTER_RELEASED";
default:
return "Unknown";
}
}
+
diff --git a/automotive/evs/1.1/vts/functional/FrameHandler.h b/automotive/evs/1.1/vts/functional/FrameHandler.h
index 7f87cb4..e5f1b8f 100644
--- a/automotive/evs/1.1/vts/functional/FrameHandler.h
+++ b/automotive/evs/1.1/vts/functional/FrameHandler.h
@@ -33,7 +33,6 @@
using ::android::sp;
using ::android::hardware::automotive::evs::V1_0::IEvsDisplay;
using ::android::hardware::automotive::evs::V1_0::EvsResult;
-using ::android::hardware::automotive::evs::V1_0::CameraDesc;
using BufferDesc_1_0 = ::android::hardware::automotive::evs::V1_0::BufferDesc;
using BufferDesc_1_1 = ::android::hardware::automotive::evs::V1_1::BufferDesc;
@@ -56,6 +55,13 @@
FrameHandler(android::sp <IEvsCamera> pCamera, CameraDesc cameraInfo,
android::sp <IEvsDisplay> pDisplay = nullptr,
BufferControlFlag mode = eAutoReturn);
+ virtual ~FrameHandler() {
+ if (mCamera != nullptr) {
+ /* shutdown a camera explicitly */
+ shutdown();
+ }
+ }
+
void shutdown();
bool startStream();
@@ -67,19 +73,22 @@
bool isRunning();
void waitForFrameCount(unsigned frameCount);
- bool waitForEvent(const InfoEventType aTargetEvent,
- InfoEventDesc &eventDesc);
+ bool waitForEvent(const EvsEventType aTargetEvent,
+ EvsEvent &eventDesc);
void getFramesCounters(unsigned* received, unsigned* displayed);
void getFrameDimension(unsigned* width, unsigned* height);
private:
- // Implementation for ::android::hardware::automotive::evs::V1_1::IEvsCameraStream
+ // Implementation for ::android::hardware::automotive::evs::V1_0::IEvsCameraStream
Return<void> deliverFrame(const BufferDesc_1_0& buffer) override;
- Return<void> notifyEvent(const EvsEvent& event) override;
+
+ // Implementation for ::android::hardware::automotive::evs::V1_1::IEvsCameraStream
+ Return<void> deliverFrame_1_1(const BufferDesc_1_1& buffer) override;
+ Return<void> notify(const EvsEvent& event) override;
// Local implementation details
bool copyBufferContents(const BufferDesc_1_0& tgtBuffer, const BufferDesc_1_1& srcBuffer);
- const char *eventToString(const InfoEventType aType);
+ const char *eventToString(const EvsEventType aType);
// Values initialized as startup
android::sp <IEvsCamera> mCamera;
@@ -100,7 +109,7 @@
unsigned mFramesDisplayed = 0; // Simple counter -- rolls over eventually!
unsigned mFrameWidth = 0;
unsigned mFrameHeight = 0;
- InfoEventDesc mLatestEventDesc;
+ EvsEvent mLatestEventDesc;
};
diff --git a/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp b/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp
index a6e4881..1d3fd87 100644
--- a/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp
+++ b/automotive/evs/1.1/vts/functional/VtsHalEvsV1_1TargetTest.cpp
@@ -38,8 +38,9 @@
#include "FrameHandler.h"
-#include <stdio.h>
-#include <string.h>
+#include <cstdio>
+#include <cstring>
+#include <cstdlib>
#include <hidl/HidlTransportSupport.h>
#include <hwbinder/ProcessState.h>
@@ -50,8 +51,10 @@
#include <android/log.h>
#include <android/hardware/automotive/evs/1.1/IEvsCamera.h>
#include <android/hardware/automotive/evs/1.1/IEvsCameraStream.h>
-#include <android/hardware/automotive/evs/1.0/IEvsEnumerator.h>
+#include <android/hardware/automotive/evs/1.1/IEvsEnumerator.h>
#include <android/hardware/automotive/evs/1.0/IEvsDisplay.h>
+#include <android/hardware/camera/device/3.2/ICameraDevice.h>
+#include <system/camera_metadata.h>
#include <VtsHalHidlTargetTestBase.h>
#include <VtsHalHidlTargetTestEnvBase.h>
@@ -64,13 +67,28 @@
using ::android::hardware::hidl_handle;
using ::android::hardware::hidl_string;
using ::android::sp;
-using ::android::hardware::automotive::evs::V1_0::CameraDesc;
+using ::android::hardware::camera::device::V3_2::Stream;
using ::android::hardware::automotive::evs::V1_0::DisplayDesc;
using ::android::hardware::automotive::evs::V1_0::DisplayState;
-using ::android::hardware::automotive::evs::V1_0::IEvsEnumerator;
+using ::android::hardware::graphics::common::V1_0::PixelFormat;
using IEvsCamera_1_0 = ::android::hardware::automotive::evs::V1_0::IEvsCamera;
using IEvsCamera_1_1 = ::android::hardware::automotive::evs::V1_1::IEvsCamera;
+/*
+ * Plese note that this is different from what is defined in
+ * libhardware/modules/camera/3_4/metadata/types.h; this has one additional
+ * field to store a framerate.
+ */
+const size_t kStreamCfgSz = 5;
+typedef struct {
+ int32_t width;
+ int32_t height;
+ int32_t format;
+ int32_t direction;
+ int32_t framerate;
+} RawStreamConfig;
+
+
// Test environment for Evs HIDL HAL.
class EvsHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
public:
@@ -107,15 +125,16 @@
assert(pEnumerator != nullptr);
// Get the camera list
- pEnumerator->getCameraList([this](hidl_vec <CameraDesc> cameraList) {
- ALOGI("Camera list callback received %zu cameras",
- cameraList.size());
- cameraInfo.reserve(cameraList.size());
- for (auto&& cam: cameraList) {
- ALOGI("Found camera %s", cam.cameraId.c_str());
- cameraInfo.push_back(cam);
- }
- }
+ pEnumerator->getCameraList_1_1(
+ [this](hidl_vec <CameraDesc> cameraList) {
+ ALOGI("Camera list callback received %zu cameras",
+ cameraList.size());
+ cameraInfo.reserve(cameraList.size());
+ for (auto&& cam: cameraList) {
+ ALOGI("Found camera %s", cam.v1.cameraId.c_str());
+ cameraInfo.push_back(cam);
+ }
+ }
);
// We insist on at least one camera for EVS to pass any camera tests
@@ -143,19 +162,23 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Open and close each camera twice
for (auto&& cam: cameraInfo) {
for (int pass = 0; pass < 2; pass++) {
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
// Verify that this camera self-identifies correctly
- pCam->getCameraInfo([&cam](CameraDesc desc) {
- ALOGD("Found camera %s", desc.cameraId.c_str());
- EXPECT_EQ(cam.cameraId, desc.cameraId);
- }
+ pCam->getCameraInfo_1_1([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.v1.cameraId.c_str());
+ EXPECT_EQ(cam.v1.cameraId, desc.v1.cameraId);
+ }
);
// Explicitly close the camera so resources are released right away
@@ -177,22 +200,26 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Open and close each camera twice
for (auto&& cam: cameraInfo) {
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
// Verify that this camera self-identifies correctly
- pCam->getCameraInfo([&cam](CameraDesc desc) {
- ALOGD("Found camera %s", desc.cameraId.c_str());
- EXPECT_EQ(cam.cameraId, desc.cameraId);
- }
+ pCam->getCameraInfo_1_1([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.v1.cameraId.c_str());
+ EXPECT_EQ(cam.v1.cameraId, desc.v1.cameraId);
+ }
);
sp<IEvsCamera_1_1> pCam2 =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, pCam2);
ASSERT_NE(pCam2, nullptr);
@@ -210,10 +237,10 @@
pEnumerator->closeCamera(pCam);
// Verify that the second camera instance self-identifies correctly
- pCam2->getCameraInfo([&cam](CameraDesc desc) {
- ALOGD("Found camera %s", desc.cameraId.c_str());
- EXPECT_EQ(cam.cameraId, desc.cameraId);
- }
+ pCam2->getCameraInfo_1_1([&cam](CameraDesc desc) {
+ ALOGD("Found camera %s", desc.v1.cameraId.c_str());
+ EXPECT_EQ(cam.v1.cameraId, desc.v1.cameraId);
+ }
);
// Close the second camera instance
@@ -235,10 +262,14 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
for (auto&& cam: cameraInfo) {
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
@@ -303,11 +334,15 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
for (auto&& cam: cameraInfo) {
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
@@ -371,6 +406,10 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Request exclusive access to the EVS display
sp<IEvsDisplay> pDisplay = pEnumerator->openDisplay();
ASSERT_NE(pDisplay, nullptr);
@@ -378,7 +417,7 @@
// Test each reported camera
for (auto&& cam: cameraInfo) {
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
@@ -439,16 +478,20 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
for (auto&& cam: cameraInfo) {
// Create two camera clients.
sp<IEvsCamera_1_1> pCam0 =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam0, nullptr);
sp<IEvsCamera_1_1> pCam1 =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam1, nullptr);
@@ -486,7 +529,6 @@
nsecs_t runTime = end - firstFrame;
float framesPerSecond0 = framesReceived0 / (runTime * kNanoToSeconds);
float framesPerSecond1 = framesReceived1 / (runTime * kNanoToSeconds);
- printf("Measured camera rate %3.2f fps and %3.2f fps\n", framesPerSecond0, framesPerSecond1);
ALOGI("Measured camera rate %3.2f fps and %3.2f fps", framesPerSecond0, framesPerSecond1);
EXPECT_GE(framesPerSecond0, kMinimumFramesPerSecond);
EXPECT_GE(framesPerSecond1, kMinimumFramesPerSecond);
@@ -526,14 +568,33 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
+ Return<EvsResult> result = EvsResult::OK;
for (auto&& cam: cameraInfo) {
// Create a camera client
sp<IEvsCamera_1_1> pCam =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam, nullptr);
+ // Get the parameter list
+ std::vector<CameraParam> cmds;
+ pCam->getParameterList([&cmds](hidl_vec<CameraParam> cmdList) {
+ cmds.reserve(cmdList.size());
+ for (auto &&cmd : cmdList) {
+ cmds.push_back(cmd);
+ }
+ }
+ );
+
+ if (cmds.size() < 1) {
+ continue;
+ }
+
// Set up per-client frame receiver objects which will fire up its own thread
sp<FrameHandler> frameHandler = new FrameHandler(pCam, cam,
nullptr,
@@ -547,83 +608,70 @@
// Ensure the stream starts
frameHandler->waitForFrameCount(1);
- // Try to program few parameters
- EvsResult result = EvsResult::OK;
- int32_t val0 = 100;
- int32_t val1 = 0;
-
result = pCam->setMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
- pCam->setParameter(CameraParam::BRIGHTNESS, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
+ for (auto &cmd : cmds) {
+ // Get a valid parameter value range
+ int32_t minVal, maxVal, step;
+ pCam->getIntParameterRange(
+ cmd,
+ [&minVal, &maxVal, &step](int32_t val0, int32_t val1, int32_t val2) {
+ minVal = val0;
+ maxVal = val1;
+ step = val2;
+ }
+ );
- if (result == EvsResult::OK) {
- pCam->getParameter(CameraParam::BRIGHTNESS,
+ EvsResult result = EvsResult::OK;
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ int32_t val1 = 0;
+ pCam->getIntParameter(CameraParam::AUTO_FOCUS,
+ [&result, &val1](auto status, auto value) {
+ result = status;
+ if (status == EvsResult::OK) {
+ val1 = value;
+ }
+ });
+ if (val1 != 0) {
+ pCam->setIntParameter(CameraParam::AUTO_FOCUS, 0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val1, 0);
+ }
+ }
+
+ // Try to program a parameter with a random value [minVal, maxVal]
+ int32_t val0 = minVal + (std::rand() % (maxVal - minVal));
+ int32_t val1 = 0;
+
+ // Rounding down
+ val0 = val0 - (val0 % step);
+ pCam->setIntParameter(cmd, val0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+
+ ASSERT_EQ(EvsResult::OK, result);
+
+ pCam->getIntParameter(cmd,
[&result, &val1](auto status, auto value) {
result = status;
if (status == EvsResult::OK) {
val1 = value;
}
});
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
- ASSERT_EQ(val0, val1) << "Values are not matched.";
- }
-
- val0 = 80;
- val1 = 0;
- pCam->setParameter(CameraParam::CONTRAST, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
-
- if (result == EvsResult::OK) {
- pCam->getParameter(CameraParam::CONTRAST,
- [&result, &val1](auto status, auto value) {
- result = status;
- if (status == EvsResult::OK) {
- val1 = value;
- }
- });
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
- ASSERT_EQ(val0, val1) << "Values are not matched.";
- }
-
- val0 = 300;
- val1 = 0;
- pCam->setParameter(CameraParam::ABSOLUTE_ZOOM, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
-
- if (result == EvsResult::OK) {
- pCam->getParameter(CameraParam::ABSOLUTE_ZOOM,
- [&result, &val1](auto status, auto value) {
- result = status;
- if (status == EvsResult::OK) {
- val1 = value;
- }
- });
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
+ ASSERT_EQ(EvsResult::OK, result);
ASSERT_EQ(val0, val1) << "Values are not matched.";
}
result = pCam->unsetMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
// Shutdown
frameHandler->shutdown();
@@ -650,15 +698,19 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
for (auto&& cam: cameraInfo) {
// Create two camera clients.
sp<IEvsCamera_1_1> pCamMaster =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCamMaster, nullptr);
sp<IEvsCamera_1_1> pCamNonMaster =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCamNonMaster, nullptr);
@@ -698,15 +750,15 @@
// Non-master client expects to receive a master role relesed
// notification.
- InfoEventDesc aNotification = {};
+ EvsEvent aNotification = {};
// Release a master role.
pCamMaster->unsetMaster();
// Verify a change notification.
- frameHandlerNonMaster->waitForEvent(InfoEventType::MASTER_RELEASED, aNotification);
- ASSERT_EQ(InfoEventType::MASTER_RELEASED,
- static_cast<InfoEventType>(aNotification.aType));
+ frameHandlerNonMaster->waitForEvent(EvsEventType::MASTER_RELEASED, aNotification);
+ ASSERT_EQ(EvsEventType::MASTER_RELEASED,
+ static_cast<EvsEventType>(aNotification.aType));
// Non-master becomes a master.
result = pCamNonMaster->setMaster();
@@ -720,9 +772,9 @@
frameHandlerNonMaster->shutdown();
// Verify a change notification.
- frameHandlerMaster->waitForEvent(InfoEventType::MASTER_RELEASED, aNotification);
- ASSERT_EQ(InfoEventType::MASTER_RELEASED,
- static_cast<InfoEventType>(aNotification.aType));
+ frameHandlerMaster->waitForEvent(EvsEventType::MASTER_RELEASED, aNotification);
+ ASSERT_EQ(EvsEventType::MASTER_RELEASED,
+ static_cast<EvsEventType>(aNotification.aType));
// Closing another stream.
frameHandlerMaster->shutdown();
@@ -752,18 +804,46 @@
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Test each reported camera
for (auto&& cam: cameraInfo) {
// Create two camera clients.
sp<IEvsCamera_1_1> pCamMaster =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCamMaster, nullptr);
sp<IEvsCamera_1_1> pCamNonMaster =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCamNonMaster, nullptr);
+ // Get the parameter list
+ std::vector<CameraParam> camMasterCmds, camNonMasterCmds;
+ pCamMaster->getParameterList([&camMasterCmds](hidl_vec<CameraParam> cmdList) {
+ camMasterCmds.reserve(cmdList.size());
+ for (auto &&cmd : cmdList) {
+ camMasterCmds.push_back(cmd);
+ }
+ }
+ );
+
+ pCamNonMaster->getParameterList([&camNonMasterCmds](hidl_vec<CameraParam> cmdList) {
+ camNonMasterCmds.reserve(cmdList.size());
+ for (auto &&cmd : cmdList) {
+ camNonMasterCmds.push_back(cmd);
+ }
+ }
+ );
+
+ if (camMasterCmds.size() < 1 ||
+ camNonMasterCmds.size() < 1) {
+ // Skip a camera device if it does not support any parameter.
+ continue;
+ }
+
// Set up per-client frame receiver objects which will fire up its own thread
sp<FrameHandler> frameHandlerMaster =
new FrameHandler(pCamMaster, cam,
@@ -778,11 +858,11 @@
// Set one client as the master
EvsResult result = pCamMaster->setMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
// Try to set another client as the master.
result = pCamNonMaster->setMaster();
- ASSERT_TRUE(result == EvsResult::OWNERSHIP_LOST);
+ ASSERT_EQ(EvsResult::OWNERSHIP_LOST, result);
// Start the camera's video stream via a master client.
bool startResult = frameHandlerMaster->startStream();
@@ -798,131 +878,168 @@
// Ensure the stream starts
frameHandlerNonMaster->waitForFrameCount(1);
- // Try to program CameraParam::BRIGHTNESS
- int32_t val0 = 100;
+ int32_t val0 = 0;
int32_t val1 = 0;
+ for (auto &cmd : camMasterCmds) {
+ // Get a valid parameter value range
+ int32_t minVal, maxVal, step;
+ pCamMaster->getIntParameterRange(
+ cmd,
+ [&minVal, &maxVal, &step](int32_t val0, int32_t val1, int32_t val2) {
+ minVal = val0;
+ maxVal = val1;
+ step = val2;
+ }
+ );
- pCamMaster->setParameter(CameraParam::BRIGHTNESS, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK || // Succeeded to program
- result == EvsResult::INVALID_ARG); // Camera parameter is not supported
+ EvsResult result = EvsResult::OK;
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ int32_t val1 = 1;
+ pCamMaster->setIntParameter(CameraParam::AUTO_FOCUS, 0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val1, 0);
+ }
- // Non-master client expects to receive a parameter change notification
- // whenever a master client adjusts it.
- InfoEventDesc aNotification = {};
+ // Try to program a parameter
+ val0 = minVal + (std::rand() % (maxVal - minVal));
- pCamMaster->getParameter(CameraParam::BRIGHTNESS,
- [&result, &val1](auto status, auto value) {
- result = status;
- if (status == EvsResult::OK) {
- val1 = value;
- }
- });
- ASSERT_TRUE(result == EvsResult::OK || // Succeeded to program
- result == EvsResult::INVALID_ARG); // Camera parameter is not supported
- if (result == EvsResult::OK) {
- ASSERT_EQ(val0, val1) << "Values are not matched.";
+ // Rounding down
+ val0 = val0 - (val0 % step);
+ pCamMaster->setIntParameter(cmd, val0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
- // Verify a change notification
- frameHandlerNonMaster->waitForEvent(InfoEventType::PARAMETER_CHANGED, aNotification);
- ASSERT_EQ(InfoEventType::PARAMETER_CHANGED,
- static_cast<InfoEventType>(aNotification.aType));
- ASSERT_EQ(CameraParam::BRIGHTNESS,
- static_cast<CameraParam>(aNotification.payload[0]));
- ASSERT_EQ(val1,
- static_cast<int32_t>(aNotification.payload[1]));
- }
+ // Wait a moment
+ sleep(1);
- // Try to program CameraParam::CONTRAST
- val0 = 80;
- val1 = 0;
- pCamMaster->setParameter(CameraParam::CONTRAST, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK || // Succeeded to program
- result == EvsResult::INVALID_ARG); // Camera parameter is not supported
+ // Non-master client expects to receive a parameter change notification
+ // whenever a master client adjusts it.
+ EvsEvent aNotification = {};
- if (result == EvsResult::OK) {
- pCamMaster->getParameter(CameraParam::CONTRAST,
+ pCamMaster->getIntParameter(cmd,
[&result, &val1](auto status, auto value) {
result = status;
if (status == EvsResult::OK) {
val1 = value;
}
});
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
ASSERT_EQ(val0, val1) << "Values are not matched.";
-
// Verify a change notification
- frameHandlerNonMaster->waitForEvent(InfoEventType::PARAMETER_CHANGED, aNotification);
- ASSERT_EQ(InfoEventType::PARAMETER_CHANGED,
- static_cast<InfoEventType>(aNotification.aType));
- ASSERT_EQ(CameraParam::CONTRAST,
+ frameHandlerNonMaster->waitForEvent(EvsEventType::PARAMETER_CHANGED, aNotification);
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification.aType));
+ ASSERT_EQ(cmd,
static_cast<CameraParam>(aNotification.payload[0]));
ASSERT_EQ(val1,
static_cast<int32_t>(aNotification.payload[1]));
}
// Try to adjust a parameter via non-master client
- pCamNonMaster->setParameter(CameraParam::CONTRAST, val0,
+ pCamNonMaster->setIntParameter(camNonMasterCmds[0], val0,
[&result, &val1](auto status, auto effectiveValue) {
result = status;
val1 = effectiveValue;
});
- ASSERT_TRUE(result == EvsResult::INVALID_ARG);
+ ASSERT_EQ(EvsResult::INVALID_ARG, result);
// Non-master client attemps to be a master
result = pCamNonMaster->setMaster();
- ASSERT_TRUE(result == EvsResult::OWNERSHIP_LOST);
+ ASSERT_EQ(EvsResult::OWNERSHIP_LOST, result);
// Master client retires from a master role
result = pCamMaster->unsetMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
// Try to adjust a parameter after being retired
- pCamMaster->setParameter(CameraParam::BRIGHTNESS, val0,
+ pCamMaster->setIntParameter(camMasterCmds[0], val0,
[&result, &val1](auto status, auto effectiveValue) {
result = status;
val1 = effectiveValue;
});
- ASSERT_TRUE(result == EvsResult::INVALID_ARG);
+ ASSERT_EQ(EvsResult::INVALID_ARG, result);
// Non-master client becomes a master
result = pCamNonMaster->setMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
// Try to adjust a parameter via new master client
- pCamNonMaster->setParameter(CameraParam::BRIGHTNESS, val0,
- [&result, &val1](auto status, auto effectiveValue) {
- result = status;
- val1 = effectiveValue;
- });
- ASSERT_TRUE(result == EvsResult::OK || // Succeeded to program
- result == EvsResult::INVALID_ARG); // Camera parameter is not supported
+ for (auto &cmd : camNonMasterCmds) {
+ // Get a valid parameter value range
+ int32_t minVal, maxVal, step;
+ pCamNonMaster->getIntParameterRange(
+ cmd,
+ [&minVal, &maxVal, &step](int32_t val0, int32_t val1, int32_t val2) {
+ minVal = val0;
+ maxVal = val1;
+ step = val2;
+ }
+ );
- // Wait a moment
- sleep(1);
+ EvsResult result = EvsResult::OK;
+ if (cmd == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ int32_t val1 = 1;
+ pCamNonMaster->setIntParameter(CameraParam::AUTO_FOCUS, 0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val1, 0);
+ }
- // Verify a change notification
- if (result == EvsResult::OK) {
- frameHandlerMaster->waitForEvent(InfoEventType::PARAMETER_CHANGED, aNotification);
- ASSERT_EQ(static_cast<InfoEventType>(aNotification.aType),
- InfoEventType::PARAMETER_CHANGED);
- ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]),
- CameraParam::BRIGHTNESS);
+ // Try to program a parameter
+ val0 = minVal + (std::rand() % (maxVal - minVal));
+
+ // Rounding down
+ val0 = val0 - (val0 % step);
+ pCamNonMaster->setIntParameter(cmd, val0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+
+ // Wait a moment
+ sleep(1);
+
+ // Non-master client expects to receive a parameter change notification
+ // whenever a master client adjusts it.
+ EvsEvent aNotification = {};
+
+ pCamNonMaster->getIntParameter(cmd,
+ [&result, &val1](auto status, auto value) {
+ result = status;
+ if (status == EvsResult::OK) {
+ val1 = value;
+ }
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val0, val1) << "Values are not matched.";
+
+ // Verify a change notification
+ frameHandlerMaster->waitForEvent(EvsEventType::PARAMETER_CHANGED, aNotification);
+ ASSERT_EQ(EvsEventType::PARAMETER_CHANGED,
+ static_cast<EvsEventType>(aNotification.aType));
+ ASSERT_EQ(cmd,
+ static_cast<CameraParam>(aNotification.payload[0]));
ASSERT_EQ(val1,
static_cast<int32_t>(aNotification.payload[1]));
}
// New master retires from a master role
result = pCamNonMaster->unsetMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ ASSERT_EQ(EvsResult::OK, result);
// Shutdown
frameHandlerMaster->shutdown();
@@ -943,9 +1060,18 @@
TEST_F(EvsHidlTest, HighPriorityCameraClient) {
ALOGI("Starting HighPriorityCameraClient test");
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
// Get the camera list
loadCameraList();
+ // Using null stream configuration makes EVS uses the default resolution and
+ // output format.
+ Stream nullCfg = {};
+
// Request exclusive access to the EVS display
sp<IEvsDisplay> pDisplay = pEnumerator->openDisplay();
ASSERT_NE(pDisplay, nullptr);
@@ -954,15 +1080,38 @@
for (auto&& cam: cameraInfo) {
// Create two clients
sp<IEvsCamera_1_1> pCam0 =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam0, nullptr);
sp<IEvsCamera_1_1> pCam1 =
- IEvsCamera_1_1::castFrom(pEnumerator->openCamera(cam.cameraId))
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, nullCfg))
.withDefault(nullptr);
ASSERT_NE(pCam1, nullptr);
+ // Get the parameter list; this test will use the first command in both
+ // lists.
+ std::vector<CameraParam> cam0Cmds, cam1Cmds;
+ pCam0->getParameterList([&cam0Cmds](hidl_vec<CameraParam> cmdList) {
+ cam0Cmds.reserve(cmdList.size());
+ for (auto &&cmd : cmdList) {
+ cam0Cmds.push_back(cmd);
+ }
+ }
+ );
+
+ pCam1->getParameterList([&cam1Cmds](hidl_vec<CameraParam> cmdList) {
+ cam1Cmds.reserve(cmdList.size());
+ for (auto &&cmd : cmdList) {
+ cam1Cmds.push_back(cmd);
+ }
+ }
+ );
+ if (cam0Cmds.size() < 1 || cam1Cmds.size() < 1) {
+ // Cannot execute this test.
+ return;
+ }
+
// Set up a frame receiver object which will fire up its own thread.
sp<FrameHandler> frameHandler0 = new FrameHandler(pCam0, cam,
pDisplay,
@@ -982,67 +1131,121 @@
frameHandler0->waitForFrameCount(1);
frameHandler1->waitForFrameCount(1);
- // Client 1 becomes a master and programs a brightness.
+ // Client 1 becomes a master and programs a parameter.
EvsResult result = EvsResult::OK;
- int32_t val0 = 100;
+ // Get a valid parameter value range
+ int32_t minVal, maxVal, step;
+ pCam1->getIntParameterRange(
+ cam1Cmds[0],
+ [&minVal, &maxVal, &step](int32_t val0, int32_t val1, int32_t val2) {
+ minVal = val0;
+ maxVal = val1;
+ step = val2;
+ }
+ );
+
+ if (cam1Cmds[0] == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ int32_t val1 = 0;
+ pCam1->getIntParameter(CameraParam::AUTO_FOCUS,
+ [&result, &val1](auto status, auto value) {
+ result = status;
+ if (status == EvsResult::OK) {
+ val1 = value;
+ }
+ });
+ if (val1 != 0) {
+ pCam1->setIntParameter(CameraParam::AUTO_FOCUS, 0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val1, 0);
+ }
+ }
+
+ // Try to program a parameter with a random value [minVal, maxVal]
+ int32_t val0 = minVal + (std::rand() % (maxVal - minVal));
int32_t val1 = 0;
- result = pCam1->setMaster();
- ASSERT_TRUE(result == EvsResult::OK);
+ // Rounding down
+ val0 = val0 - (val0 % step);
- pCam1->setParameter(CameraParam::BRIGHTNESS, val0,
+ result = pCam1->setMaster();
+ ASSERT_EQ(EvsResult::OK, result);
+
+ pCam1->setIntParameter(cam1Cmds[0], val0,
[&result, &val1](auto status, auto effectiveValue) {
result = status;
val1 = effectiveValue;
});
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
-
+ ASSERT_EQ(EvsResult::OK, result);
// Verify a change notification
- InfoEventDesc aNotification = {};
- if (result == EvsResult::OK) {
- bool timeout =
- frameHandler0->waitForEvent(InfoEventType::PARAMETER_CHANGED, aNotification);
- ASSERT_FALSE(timeout) << "Expected event does not arrive";
- ASSERT_EQ(static_cast<InfoEventType>(aNotification.aType),
- InfoEventType::PARAMETER_CHANGED);
- ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]),
- CameraParam::BRIGHTNESS);
- ASSERT_EQ(val1,
- static_cast<int32_t>(aNotification.payload[1]));
- }
+ EvsEvent aNotification = {};
+ bool timeout =
+ frameHandler0->waitForEvent(EvsEventType::PARAMETER_CHANGED, aNotification);
+ ASSERT_FALSE(timeout) << "Expected event does not arrive";
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType),
+ EvsEventType::PARAMETER_CHANGED);
+ ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]),
+ cam1Cmds[0]);
+ ASSERT_EQ(val1,
+ static_cast<int32_t>(aNotification.payload[1]));
// Client 0 steals a master role
ASSERT_EQ(EvsResult::OK, pCam0->forceMaster(pDisplay));
- frameHandler1->waitForEvent(InfoEventType::MASTER_RELEASED, aNotification);
- ASSERT_EQ(static_cast<InfoEventType>(aNotification.aType),
- InfoEventType::MASTER_RELEASED);
+ frameHandler1->waitForEvent(EvsEventType::MASTER_RELEASED, aNotification);
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType),
+ EvsEventType::MASTER_RELEASED);
- // Client 0 programs a brightness
- val0 = 50;
+ // Client 0 programs a parameter
+ val0 = minVal + (std::rand() % (maxVal - minVal));
val1 = 0;
- pCam0->setParameter(CameraParam::BRIGHTNESS, val0,
+
+ // Rounding down
+ val0 = val0 - (val0 % step);
+
+ if (cam0Cmds[0] == CameraParam::ABSOLUTE_FOCUS) {
+ // Try to turn off auto-focus
+ int32_t val1 = 0;
+ pCam0->getIntParameter(CameraParam::AUTO_FOCUS,
+ [&result, &val1](auto status, auto value) {
+ result = status;
+ if (status == EvsResult::OK) {
+ val1 = value;
+ }
+ });
+ if (val1 != 0) {
+ pCam0->setIntParameter(CameraParam::AUTO_FOCUS, 0,
+ [&result, &val1](auto status, auto effectiveValue) {
+ result = status;
+ val1 = effectiveValue;
+ });
+ ASSERT_EQ(EvsResult::OK, result);
+ ASSERT_EQ(val1, 0);
+ }
+ }
+
+ pCam0->setIntParameter(cam0Cmds[0], val0,
[&result, &val1](auto status, auto effectiveValue) {
result = status;
val1 = effectiveValue;
});
- ASSERT_TRUE(result == EvsResult::OK ||
- result == EvsResult::INVALID_ARG);
+ ASSERT_EQ(EvsResult::OK, result);
// Verify a change notification
- if (result == EvsResult::OK) {
- bool timeout =
- frameHandler1->waitForEvent(InfoEventType::PARAMETER_CHANGED, aNotification);
- ASSERT_FALSE(timeout) << "Expected event does not arrive";
- ASSERT_EQ(static_cast<InfoEventType>(aNotification.aType),
- InfoEventType::PARAMETER_CHANGED);
- ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]),
- CameraParam::BRIGHTNESS);
- ASSERT_EQ(val1,
- static_cast<int32_t>(aNotification.payload[1]));
- }
+ timeout =
+ frameHandler1->waitForEvent(EvsEventType::PARAMETER_CHANGED, aNotification);
+ ASSERT_FALSE(timeout) << "Expected event does not arrive";
+ ASSERT_EQ(static_cast<EvsEventType>(aNotification.aType),
+ EvsEventType::PARAMETER_CHANGED);
+ ASSERT_EQ(static_cast<CameraParam>(aNotification.payload[0]),
+ cam0Cmds[0]);
+ ASSERT_EQ(val1,
+ static_cast<int32_t>(aNotification.payload[1]));
// Turn off the display (yes, before the stream stops -- it should be handled)
pDisplay->setDisplayState(DisplayState::NOT_VISIBLE);
@@ -1061,6 +1264,248 @@
}
+/*
+ * CameraUseStreamConfigToDisplay:
+ * End to end test of data flowing from the camera to the display. Similar to
+ * CameraToDisplayRoundTrip test case but this case retrieves available stream
+ * configurations from EVS and uses one of them to start a video stream.
+ */
+TEST_F(EvsHidlTest, CameraUseStreamConfigToDisplay) {
+ ALOGI("Starting CameraUseStreamConfigToDisplay test");
+
+ // Get the camera list
+ loadCameraList();
+
+ // Request exclusive access to the EVS display
+ sp<IEvsDisplay> pDisplay = pEnumerator->openDisplay();
+ ASSERT_NE(pDisplay, nullptr);
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+ // choose a configuration that has a frame rate faster than minReqFps.
+ Stream targetCfg = {};
+ const int32_t minReqFps = 15;
+ int32_t maxArea = 0;
+ camera_metadata_entry_t streamCfgs;
+ bool foundCfg = false;
+ if (!find_camera_metadata_entry(
+ reinterpret_cast<camera_metadata_t *>(cam.metadata.data()),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ &streamCfgs)) {
+ // Stream configurations are found in metadata
+ RawStreamConfig *ptr = reinterpret_cast<RawStreamConfig *>(streamCfgs.data.i32);
+ for (unsigned idx = 0; idx < streamCfgs.count; idx += kStreamCfgSz) {
+ if (ptr->direction == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ ptr->format == HAL_PIXEL_FORMAT_RGBA_8888) {
+
+ if (ptr->width * ptr->height > maxArea &&
+ ptr->framerate >= minReqFps) {
+ targetCfg.width = ptr->width;
+ targetCfg.height = ptr->height;
+
+ maxArea = ptr->width * ptr->height;
+ foundCfg = true;
+ }
+ }
+ ++ptr;
+ }
+ }
+ targetCfg.format =
+ static_cast<PixelFormat>(HAL_PIXEL_FORMAT_RGBA_8888);
+
+ if (!foundCfg) {
+ // Current EVS camera does not provide stream configurations in the
+ // metadata.
+ continue;
+ }
+
+ sp<IEvsCamera_1_1> pCam =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, targetCfg))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam, nullptr);
+
+ // Set up a frame receiver object which will fire up its own thread.
+ sp<FrameHandler> frameHandler = new FrameHandler(pCam, cam,
+ pDisplay,
+ FrameHandler::eAutoReturn);
+
+
+ // Activate the display
+ pDisplay->setDisplayState(DisplayState::VISIBLE_ON_NEXT_FRAME);
+
+ // Start the camera's video stream
+ bool startResult = frameHandler->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Wait a while to let the data flow
+ static const int kSecondsToWait = 5;
+ const int streamTimeMs = kSecondsToWait * kSecondsToMilliseconds -
+ kMaxStreamStartMilliseconds;
+ const unsigned minimumFramesExpected = streamTimeMs * kMinimumFramesPerSecond /
+ kSecondsToMilliseconds;
+ sleep(kSecondsToWait);
+ unsigned framesReceived = 0;
+ unsigned framesDisplayed = 0;
+ frameHandler->getFramesCounters(&framesReceived, &framesDisplayed);
+ EXPECT_EQ(framesReceived, framesDisplayed);
+ EXPECT_GE(framesDisplayed, minimumFramesExpected);
+
+ // Turn off the display (yes, before the stream stops -- it should be handled)
+ pDisplay->setDisplayState(DisplayState::NOT_VISIBLE);
+
+ // Shut down the streamer
+ frameHandler->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam);
+ }
+
+ // Explicitly release the display
+ pEnumerator->closeDisplay(pDisplay);
+}
+
+
+/*
+ * MultiCameraStreamUseConfig:
+ * Verify that each client can start and stop video streams on the same
+ * underlying camera with same configuration.
+ */
+TEST_F(EvsHidlTest, MultiCameraStreamUseConfig) {
+ ALOGI("Starting MultiCameraStream test");
+
+ if (mIsHwModule) {
+ // This test is not for HW module implementation.
+ return;
+ }
+
+ // Get the camera list
+ loadCameraList();
+
+ // Test each reported camera
+ for (auto&& cam: cameraInfo) {
+ // choose a configuration that has a frame rate faster than minReqFps.
+ Stream targetCfg = {};
+ const int32_t minReqFps = 15;
+ int32_t maxArea = 0;
+ camera_metadata_entry_t streamCfgs;
+ bool foundCfg = false;
+ if (!find_camera_metadata_entry(
+ reinterpret_cast<camera_metadata_t *>(cam.metadata.data()),
+ ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ &streamCfgs)) {
+ // Stream configurations are found in metadata
+ RawStreamConfig *ptr = reinterpret_cast<RawStreamConfig *>(streamCfgs.data.i32);
+ for (unsigned idx = 0; idx < streamCfgs.count; idx += kStreamCfgSz) {
+ if (ptr->direction == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+ ptr->format == HAL_PIXEL_FORMAT_RGBA_8888) {
+
+ if (ptr->width * ptr->height > maxArea &&
+ ptr->framerate >= minReqFps) {
+ targetCfg.width = ptr->width;
+ targetCfg.height = ptr->height;
+
+ maxArea = ptr->width * ptr->height;
+ foundCfg = true;
+ }
+ }
+ ++ptr;
+ }
+ }
+ targetCfg.format =
+ static_cast<PixelFormat>(HAL_PIXEL_FORMAT_RGBA_8888);
+
+ if (!foundCfg) {
+ ALOGI("Device %s does not provide a list of supported stream configurations, skipped",
+ cam.v1.cameraId.c_str());
+
+ continue;
+ }
+
+ // Create the first camera client with a selected stream configuration.
+ sp<IEvsCamera_1_1> pCam0 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, targetCfg))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam0, nullptr);
+
+ // Try to create the second camera client with different stream
+ // configuration.
+ int32_t id = targetCfg.id;
+ targetCfg.id += 1; // EVS manager sees only the stream id.
+ sp<IEvsCamera_1_1> pCam1 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, targetCfg))
+ .withDefault(nullptr);
+ ASSERT_EQ(pCam1, nullptr);
+
+ // Try again with same stream configuration.
+ targetCfg.id = id;
+ pCam1 =
+ IEvsCamera_1_1::castFrom(pEnumerator->openCamera_1_1(cam.v1.cameraId, targetCfg))
+ .withDefault(nullptr);
+ ASSERT_NE(pCam1, nullptr);
+
+ // Set up per-client frame receiver objects which will fire up its own thread
+ sp<FrameHandler> frameHandler0 = new FrameHandler(pCam0, cam,
+ nullptr,
+ FrameHandler::eAutoReturn);
+ ASSERT_NE(frameHandler0, nullptr);
+
+ sp<FrameHandler> frameHandler1 = new FrameHandler(pCam1, cam,
+ nullptr,
+ FrameHandler::eAutoReturn);
+ ASSERT_NE(frameHandler1, nullptr);
+
+ // Start the camera's video stream via client 0
+ bool startResult = false;
+ startResult = frameHandler0->startStream() &&
+ frameHandler1->startStream();
+ ASSERT_TRUE(startResult);
+
+ // Ensure the stream starts
+ frameHandler0->waitForFrameCount(1);
+ frameHandler1->waitForFrameCount(1);
+
+ nsecs_t firstFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+
+ // Wait a bit, then ensure both clients get at least the required minimum number of frames
+ sleep(5);
+ nsecs_t end = systemTime(SYSTEM_TIME_MONOTONIC);
+ unsigned framesReceived0 = 0, framesReceived1 = 0;
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+ framesReceived0 = framesReceived0 - 1; // Back out the first frame we already waited for
+ framesReceived1 = framesReceived1 - 1; // Back out the first frame we already waited for
+ nsecs_t runTime = end - firstFrame;
+ float framesPerSecond0 = framesReceived0 / (runTime * kNanoToSeconds);
+ float framesPerSecond1 = framesReceived1 / (runTime * kNanoToSeconds);
+ ALOGI("Measured camera rate %3.2f fps and %3.2f fps", framesPerSecond0, framesPerSecond1);
+ EXPECT_GE(framesPerSecond0, kMinimumFramesPerSecond);
+ EXPECT_GE(framesPerSecond1, kMinimumFramesPerSecond);
+
+ // Shutdown one client
+ frameHandler0->shutdown();
+
+ // Read frame counters again
+ frameHandler0->getFramesCounters(&framesReceived0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceived1, nullptr);
+
+ // Wait a bit again
+ sleep(5);
+ unsigned framesReceivedAfterStop0 = 0, framesReceivedAfterStop1 = 0;
+ frameHandler0->getFramesCounters(&framesReceivedAfterStop0, nullptr);
+ frameHandler1->getFramesCounters(&framesReceivedAfterStop1, nullptr);
+ EXPECT_EQ(framesReceived0, framesReceivedAfterStop0);
+ EXPECT_LT(framesReceived1, framesReceivedAfterStop1);
+
+ // Shutdown another
+ frameHandler1->shutdown();
+
+ // Explicitly release the camera
+ pEnumerator->closeCamera(pCam0);
+ pEnumerator->closeCamera(pCam1);
+ }
+}
+
+
int main(int argc, char** argv) {
::testing::AddGlobalTestEnvironment(EvsHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
index 4a42d79..094a372 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/DefaultConfig.h
@@ -85,6 +85,28 @@
0x0666 | VehiclePropertyGroup::VENDOR | VehicleArea::GLOBAL | VehiclePropertyType::MIXED;
/**
+ * This property is used for test purpose to set properties' value from vehicle.
+ * For example: Mocking hard button press triggering a HVAC fan speed change.
+ * Android set kSetPropertyFromVehcileForTest with an array of integer {HVAC_FAN_SPEED, value of
+ * fan speed} and a long value indicates the timestamp of the events .
+ * It only works with integer type properties.
+ */
+const int32_t kSetIntPropertyFromVehcileForTest =
+ 0x1112 | VehiclePropertyGroup::VENDOR | VehicleArea::GLOBAL | VehiclePropertyType::MIXED;
+/**
+ * This property is used for test purpose to set properties' value from vehicle.
+ * It only works with float type properties.
+ */
+const int32_t kSetFloatPropertyFromVehcileForTest =
+ 0x1113 | VehiclePropertyGroup::VENDOR | VehicleArea::GLOBAL | VehiclePropertyType::MIXED;
+/**
+ * This property is used for test purpose to set properties' value from vehicle.
+ * It only works with boolean type properties.
+ */
+const int32_t kSetBooleanPropertyFromVehcileForTest =
+ 0x1114 | VehiclePropertyGroup::VENDOR | VehicleArea::GLOBAL | VehiclePropertyType::MIXED;
+
+/**
* This property is used for test purpose. End to end tests use this property to test set and get
* method for MIXED type properties.
*/
@@ -636,6 +658,37 @@
.prop = kGenerateFakeDataControllingProperty,
.access = VehiclePropertyAccess::WRITE,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {1, 0, 0, 2, 0, 0, 0, 0, 0},
+ },
+ },
+
+ {
+ .config =
+ {
+ .prop = kSetIntPropertyFromVehcileForTest,
+ .access = VehiclePropertyAccess::WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {0, 0, 0, 2, 1, 0, 0, 0, 0},
+ },
+ },
+
+ {
+ .config =
+ {
+ .prop = kSetFloatPropertyFromVehcileForTest,
+ .access = VehiclePropertyAccess::WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {0, 0, 1, 0, 1, 0, 1, 0, 0},
+ },
+ },
+
+ {
+ .config =
+ {
+ .prop = kSetBooleanPropertyFromVehcileForTest,
+ .access = VehiclePropertyAccess::WRITE,
+ .changeMode = VehiclePropertyChangeMode::ON_CHANGE,
+ .configArray = {0, 1, 1, 0, 1, 0, 0, 0, 0},
},
},
@@ -733,7 +786,7 @@
.initialValue = {.int32Values = {toInt(VehicleApPowerStateReq::ON), 0}}},
{.config = {.prop = toInt(VehicleProperty::AP_POWER_STATE_REPORT),
- .access = VehiclePropertyAccess::WRITE,
+ .access = VehiclePropertyAccess::READ_WRITE,
.changeMode = VehiclePropertyChangeMode::ON_CHANGE},
.initialValue = {.int32Values = {toInt(VehicleApPowerStateReport::WAIT_FOR_VHAL), 0}}},
diff --git a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
index b4f1f07..9d249be 100644
--- a/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
+++ b/automotive/vehicle/2.0/default/impl/vhal_v2_0/EmulatedVehicleHal.cpp
@@ -131,6 +131,36 @@
StatusCode EmulatedVehicleHal::set(const VehiclePropValue& propValue) {
static constexpr bool shouldUpdateStatus = false;
+ // set the value from vehcile side, used in end to end test.
+ if (propValue.prop == kSetIntPropertyFromVehcileForTest) {
+ auto mockValue = createVehiclePropValue(VehiclePropertyType::INT32, 1);
+ mockValue->prop = propValue.value.int32Values[0];
+ mockValue->value.int32Values[0] = propValue.value.int32Values[1];
+ mockValue->timestamp = propValue.value.int64Values[0];
+ mockValue->areaId = propValue.areaId;
+ setPropertyFromVehicle(*mockValue);
+ return StatusCode::OK;
+ }
+
+ if (propValue.prop == kSetFloatPropertyFromVehcileForTest) {
+ auto mockValue = createVehiclePropValue(VehiclePropertyType::FLOAT, 1);
+ mockValue->prop = propValue.value.int32Values[0];
+ mockValue->value.floatValues[0] = propValue.value.floatValues[0];
+ mockValue->timestamp = propValue.value.int64Values[0];
+ mockValue->areaId = propValue.areaId;
+ setPropertyFromVehicle(*mockValue);
+ return StatusCode::OK;
+ }
+ if (propValue.prop == kSetBooleanPropertyFromVehcileForTest) {
+ auto mockValue = createVehiclePropValue(VehiclePropertyType::BOOLEAN, 1);
+ mockValue->prop = propValue.value.int32Values[1];
+ mockValue->value.int32Values[0] = propValue.value.int32Values[0];
+ mockValue->timestamp = propValue.value.int64Values[0];
+ mockValue->areaId = propValue.areaId;
+ setPropertyFromVehicle(*mockValue);
+ return StatusCode::OK;
+ }
+
if (propValue.prop == kGenerateFakeDataControllingProperty) {
StatusCode status = handleGenerateFakeDataRequest(propValue);
if (status != StatusCode::OK) {
@@ -198,12 +228,19 @@
return StatusCode::NOT_AVAILABLE;
}
- if (!mPropStore->writeValue(propValue, shouldUpdateStatus)) {
- return StatusCode::INVALID_ARG;
+ /**
+ * After checking all conditions, such as the property is available, a real vhal will
+ * sent the events to Car ECU to take actions.
+ * Google HAL will just add a timestamp for the value and triggle the callback to android.
+ */
+ VehiclePropValuePtr updatedPropValue = getValuePool()->obtain(propValue);
+ updatedPropValue->timestamp = elapsedRealtimeNano();
+ if (!mPropStore->writeValue(*updatedPropValue, shouldUpdateStatus)) {
+ return StatusCode::INTERNAL_ERROR;
}
- getEmulatorOrDie()->doSetValueFromClient(propValue);
- doHalEvent(getValuePool()->obtain(propValue));
+ getEmulatorOrDie()->doSetValueFromClient(*updatedPropValue);
+ doHalEvent(std::move(updatedPropValue));
return StatusCode::OK;
}
diff --git a/automotive/vehicle/2.0/default/tests/VehicleObjectPool_test.cpp b/automotive/vehicle/2.0/default/tests/VehicleObjectPool_test.cpp
index a291351..4e3ade1 100644
--- a/automotive/vehicle/2.0/default/tests/VehicleObjectPool_test.cpp
+++ b/automotive/vehicle/2.0/default/tests/VehicleObjectPool_test.cpp
@@ -57,11 +57,11 @@
};
TEST_F(VehicleObjectPoolTest, valuePoolBasicCorrectness) {
- void* raw = valuePool->obtain(VehiclePropertyType::INT32).get();
+ auto value = valuePool->obtain(VehiclePropertyType::INT32);
// At this point, v1 should be recycled and the only object in the pool.
- ASSERT_EQ(raw, valuePool->obtain(VehiclePropertyType::INT32).get());
+ ASSERT_EQ(value.get(), valuePool->obtain(VehiclePropertyType::INT32).get());
// Obtaining value of another type - should return a new object
- ASSERT_NE(raw, valuePool->obtain(VehiclePropertyType::FLOAT).get());
+ ASSERT_NE(value.get(), valuePool->obtain(VehiclePropertyType::FLOAT).get());
ASSERT_EQ(3u, stats->Obtained);
ASSERT_EQ(2u, stats->Created);
diff --git a/automotive/vehicle/2.0/types.hal b/automotive/vehicle/2.0/types.hal
index 8c84c0a..1355d9f 100644
--- a/automotive/vehicle/2.0/types.hal
+++ b/automotive/vehicle/2.0/types.hal
@@ -1310,7 +1310,7 @@
*
* @change_mode VehiclePropertyChangeMode:ON_CHANGE
- * @access VehiclePropertyAccess:WRITE
+ * @access VehiclePropertyAccess:READ_WRITE
*/
AP_POWER_STATE_REPORT = (
0x0A01
@@ -2537,7 +2537,7 @@
* power controller must change power state to this state to shutdown
* system.
*
- * int32Values[1] : one of enum_vehicle_ap_power_state_shutdown_param_type
+ * int32Values[1] : one of VehicleApPowerStateShutdownParam
*
* SHUTDOWN_PRPARE may be requested from either WAIT_FOR_VHAL or ON states.
*/
diff --git a/bluetooth/audio/2.0/default/session/BluetoothAudioSession.h b/bluetooth/audio/2.0/default/session/BluetoothAudioSession.h
index 85e8742..838d1cc 100644
--- a/bluetooth/audio/2.0/default/session/BluetoothAudioSession.h
+++ b/bluetooth/audio/2.0/default/session/BluetoothAudioSession.h
@@ -156,8 +156,9 @@
static constexpr PcmParameters kInvalidPcmParameters = {
.sampleRate = SampleRate::RATE_UNKNOWN,
+ .channelMode = ChannelMode::UNKNOWN,
.bitsPerSample = BitsPerSample::BITS_UNKNOWN,
- .channelMode = ChannelMode::UNKNOWN};
+ };
// can't be constexpr because of non-literal type
static const CodecConfiguration kInvalidCodecConfiguration;
diff --git a/bluetooth/audio/2.0/default/session/BluetoothAudioSupportedCodecsDB.cpp b/bluetooth/audio/2.0/default/session/BluetoothAudioSupportedCodecsDB.cpp
index 292e28b..6ea61e1 100644
--- a/bluetooth/audio/2.0/default/session/BluetoothAudioSupportedCodecsDB.cpp
+++ b/bluetooth/audio/2.0/default/session/BluetoothAudioSupportedCodecsDB.cpp
@@ -94,16 +94,18 @@
static const AptxParameters kDefaultOffloadAptxCapability = {
.sampleRate = static_cast<SampleRate>(SampleRate::RATE_44100 |
SampleRate::RATE_48000),
+ .channelMode = ChannelMode::STEREO,
.bitsPerSample = BitsPerSample::BITS_16,
- .channelMode = ChannelMode::STEREO};
+};
// aptX HD: mSampleRate:(44100|48000), mBitsPerSample:(24),
// mChannelMode:(STEREO)
static const AptxParameters kDefaultOffloadAptxHdCapability = {
.sampleRate = static_cast<SampleRate>(SampleRate::RATE_44100 |
SampleRate::RATE_48000),
+ .channelMode = ChannelMode::STEREO,
.bitsPerSample = BitsPerSample::BITS_24,
- .channelMode = ChannelMode::STEREO};
+};
const std::vector<CodecCapabilities> kDefaultOffloadA2dpCodecCapabilities = {
{.codecType = CodecType::SBC, .capabilities = {}},
diff --git a/boot/1.1/Android.bp b/boot/1.1/Android.bp
new file mode 100644
index 0000000..6a8d57a
--- /dev/null
+++ b/boot/1.1/Android.bp
@@ -0,0 +1,18 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.boot@1.1",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IBootControl.hal",
+ ],
+ interfaces: [
+ "android.hardware.boot@1.0",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/boot/1.1/IBootControl.hal b/boot/1.1/IBootControl.hal
new file mode 100644
index 0000000..939dfb3
--- /dev/null
+++ b/boot/1.1/IBootControl.hal
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.boot@1.1;
+
+import @1.0::IBootControl;
+
+interface IBootControl extends @1.0::IBootControl {
+ /**
+ * Sets whether a snapshot-merge of any dynamic partition is in progress.
+ *
+ * After the merge status is set to a given value, subsequent calls to
+ * getSnapshotMergeStatus must return the set value.
+ *
+ * The merge status must be persistent across reboots. That is, getSnapshotMergeStatus
+ * must return the same value after a reboot if the merge status is not altered in any way
+ * (e.g. set by setSnapshotMergeStatus or set to CANCELLED by bootloader).
+ *
+ * Read/write access to the merge status must be atomic. When the HAL is processing a
+ * setSnapshotMergeStatus call, all subsequent calls to getSnapshotMergeStatus must block until
+ * setSnapshotMergeStatus has returned.
+ *
+ * A MERGING state indicates that dynamic partitions are partially comprised by blocks in the
+ * userdata partition.
+ *
+ * When the merge status is set to MERGING, the following operations must be prohibited from the
+ * bootloader:
+ * - Flashing or erasing "userdata" or "metadata".
+ *
+ * The following operations may be prohibited when the status is set to MERGING. If not
+ * prohibited, it is recommended that the user receive a warning.
+ * - Changing the active slot (e.g. via "fastboot set_active")
+ *
+ * @param status Merge status.
+ *
+ * @return success True on success, false otherwise.
+ */
+ setSnapshotMergeStatus(MergeStatus status) generates (bool success);
+
+ /**
+ * Returns whether a snapshot-merge of any dynamic partition is in progress.
+ *
+ * This function must return the merge status set by the last setSnapshotMergeStatus call and
+ * recorded by the bootloader with one exception. If the partitions are being flashed from the
+ * bootloader such that the pending merge must be canceled (for example, if the super partition
+ * is being flashed), this function must return CANCELLED.
+ *
+ * @return success True if the merge status is read successfully, false otherwise.
+ * @return status Merge status.
+ */
+ getSnapshotMergeStatus() generates (MergeStatus status);
+};
+
diff --git a/boot/1.1/default/Android.bp b/boot/1.1/default/Android.bp
new file mode 100644
index 0000000..dca5c26
--- /dev/null
+++ b/boot/1.1/default/Android.bp
@@ -0,0 +1,43 @@
+cc_library_shared {
+ name: "android.hardware.boot@1.1-impl",
+ defaults: [
+ "hidl_defaults",
+ "libboot_control_defaults",
+ ],
+ relative_install_path: "hw",
+ vendor: true,
+ recovery_available: true,
+ srcs: ["BootControl.cpp"],
+
+ shared_libs: [
+ "liblog",
+ "libhidlbase",
+ "libhardware",
+ "libutils",
+ "android.hardware.boot@1.0",
+ "android.hardware.boot@1.1",
+ ],
+ static_libs: [
+ "libboot_control",
+ "libfstab",
+ ],
+}
+
+cc_binary {
+ name: "android.hardware.boot@1.1-service",
+ defaults: ["hidl_defaults"],
+ relative_install_path: "hw",
+ vendor: true,
+ init_rc: ["android.hardware.boot@1.1-service.rc"],
+ srcs: ["service.cpp"],
+
+ shared_libs: [
+ "liblog",
+ "libhardware",
+ "libhidlbase",
+ "libutils",
+ "android.hardware.boot@1.0",
+ "android.hardware.boot@1.1",
+ ],
+
+}
diff --git a/boot/1.1/default/BootControl.cpp b/boot/1.1/default/BootControl.cpp
new file mode 100644
index 0000000..c9c62a4
--- /dev/null
+++ b/boot/1.1/default/BootControl.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "android.hardware.boot@1.1-impl"
+
+#include <memory>
+
+#include <log/log.h>
+
+#include "BootControl.h"
+
+namespace android {
+namespace hardware {
+namespace boot {
+namespace V1_1 {
+namespace implementation {
+
+using ::android::hardware::boot::V1_0::CommandResult;
+
+bool BootControl::Init() {
+ return impl_.Init();
+}
+
+// Methods from ::android::hardware::boot::V1_0::IBootControl follow.
+Return<uint32_t> BootControl::getNumberSlots() {
+ return impl_.GetNumberSlots();
+}
+
+Return<uint32_t> BootControl::getCurrentSlot() {
+ return impl_.GetCurrentSlot();
+}
+
+Return<void> BootControl::markBootSuccessful(markBootSuccessful_cb _hidl_cb) {
+ struct CommandResult cr;
+ if (impl_.MarkBootSuccessful()) {
+ cr.success = true;
+ cr.errMsg = "Success";
+ } else {
+ cr.success = false;
+ cr.errMsg = "Operation failed";
+ }
+ _hidl_cb(cr);
+ return Void();
+}
+
+Return<void> BootControl::setActiveBootSlot(uint32_t slot, setActiveBootSlot_cb _hidl_cb) {
+ struct CommandResult cr;
+ if (impl_.SetActiveBootSlot(slot)) {
+ cr.success = true;
+ cr.errMsg = "Success";
+ } else {
+ cr.success = false;
+ cr.errMsg = "Operation failed";
+ }
+ _hidl_cb(cr);
+ return Void();
+}
+
+Return<void> BootControl::setSlotAsUnbootable(uint32_t slot, setSlotAsUnbootable_cb _hidl_cb) {
+ struct CommandResult cr;
+ if (impl_.SetSlotAsUnbootable(slot)) {
+ cr.success = true;
+ cr.errMsg = "Success";
+ } else {
+ cr.success = false;
+ cr.errMsg = "Operation failed";
+ }
+ _hidl_cb(cr);
+ return Void();
+}
+
+Return<BoolResult> BootControl::isSlotBootable(uint32_t slot) {
+ if (!impl_.IsValidSlot(slot)) {
+ return BoolResult::INVALID_SLOT;
+ }
+ return impl_.IsSlotBootable(slot) ? BoolResult::TRUE : BoolResult::FALSE;
+}
+
+Return<BoolResult> BootControl::isSlotMarkedSuccessful(uint32_t slot) {
+ if (!impl_.IsValidSlot(slot)) {
+ return BoolResult::INVALID_SLOT;
+ }
+ return impl_.IsSlotMarkedSuccessful(slot) ? BoolResult::TRUE : BoolResult::FALSE;
+}
+
+Return<void> BootControl::getSuffix(uint32_t slot, getSuffix_cb _hidl_cb) {
+ hidl_string ans;
+ const char* suffix = impl_.GetSuffix(slot);
+ if (suffix) {
+ ans = suffix;
+ }
+ _hidl_cb(ans);
+ return Void();
+}
+
+Return<bool> BootControl::setSnapshotMergeStatus(MergeStatus status) {
+ return impl_.SetSnapshotMergeStatus(status);
+}
+
+Return<MergeStatus> BootControl::getSnapshotMergeStatus() {
+ return impl_.GetSnapshotMergeStatus();
+}
+
+IBootControl* HIDL_FETCH_IBootControl(const char* /* hal */) {
+ auto module = std::make_unique<BootControl>();
+ if (!module->Init()) {
+ ALOGE("Could not initialize BootControl module");
+ return nullptr;
+ }
+ return module.release();
+}
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace boot
+} // namespace hardware
+} // namespace android
diff --git a/boot/1.1/default/BootControl.h b/boot/1.1/default/BootControl.h
new file mode 100644
index 0000000..75511b6
--- /dev/null
+++ b/boot/1.1/default/BootControl.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/boot/1.1/IBootControl.h>
+#include <hidl/MQDescriptor.h>
+#include <hidl/Status.h>
+#include <libboot_control/libboot_control.h>
+
+namespace android {
+namespace hardware {
+namespace boot {
+namespace V1_1 {
+namespace implementation {
+
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::boot::V1_0::BoolResult;
+using ::android::hardware::boot::V1_1::IBootControl;
+using ::android::hardware::boot::V1_1::MergeStatus;
+
+class BootControl : public IBootControl {
+ public:
+ bool Init();
+
+ // Methods from ::android::hardware::boot::V1_0::IBootControl follow.
+ Return<uint32_t> getNumberSlots() override;
+ Return<uint32_t> getCurrentSlot() override;
+ Return<void> markBootSuccessful(markBootSuccessful_cb _hidl_cb) override;
+ Return<void> setActiveBootSlot(uint32_t slot, setActiveBootSlot_cb _hidl_cb) override;
+ Return<void> setSlotAsUnbootable(uint32_t slot, setSlotAsUnbootable_cb _hidl_cb) override;
+ Return<BoolResult> isSlotBootable(uint32_t slot) override;
+ Return<BoolResult> isSlotMarkedSuccessful(uint32_t slot) override;
+ Return<void> getSuffix(uint32_t slot, getSuffix_cb _hidl_cb) override;
+
+ // Methods from ::android::hardware::boot::V1_1::IBootControl follow.
+ Return<bool> setSnapshotMergeStatus(MergeStatus status) override;
+ Return<MergeStatus> getSnapshotMergeStatus() override;
+
+ private:
+ android::bootable::BootControl impl_;
+};
+
+extern "C" IBootControl* HIDL_FETCH_IBootControl(const char* name);
+
+} // namespace implementation
+} // namespace V1_1
+} // namespace boot
+} // namespace hardware
+} // namespace android
diff --git a/boot/1.1/default/android.hardware.boot@1.1-service.rc b/boot/1.1/default/android.hardware.boot@1.1-service.rc
new file mode 100644
index 0000000..83fa9d0
--- /dev/null
+++ b/boot/1.1/default/android.hardware.boot@1.1-service.rc
@@ -0,0 +1,6 @@
+service vendor.boot-hal-1-1 /vendor/bin/hw/android.hardware.boot@1.1-service
+ interface android.hardware.boot@1.0::IBootControl default
+ interface android.hardware.boot@1.1::IBootControl default
+ class early_hal
+ user root
+ group root
diff --git a/boot/1.1/default/service.cpp b/boot/1.1/default/service.cpp
new file mode 100644
index 0000000..b24b464
--- /dev/null
+++ b/boot/1.1/default/service.cpp
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "android.hardware.boot@1.1-service"
+
+#include <android/hardware/boot/1.1/IBootControl.h>
+#include <hidl/LegacySupport.h>
+
+using android::hardware::defaultPassthroughServiceImplementation;
+using ::android::hardware::boot::V1_1::IBootControl;
+
+int main(int /* argc */, char* /* argv */[]) {
+ return defaultPassthroughServiceImplementation<IBootControl>();
+}
diff --git a/boot/1.1/types.hal b/boot/1.1/types.hal
new file mode 100644
index 0000000..6346078
--- /dev/null
+++ b/boot/1.1/types.hal
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.boot@1.1;
+
+enum MergeStatus : int32_t {
+ /**
+ * No snapshot or merge is in progress.
+ */
+ NONE = 0,
+
+ /**
+ * The merge status could not be determined.
+ */
+ UNKNOWN,
+
+ /**
+ * Partitions are being snapshotted, but no merge has been started.
+ */
+ SNAPSHOTTED,
+
+ /**
+ * At least one partition has merge is in progress.
+ */
+ MERGING,
+
+ /**
+ * A merge was in progress, but it was canceled by the bootloader.
+ */
+ CANCELLED,
+};
diff --git a/boot/1.1/vts/functional/Android.bp b/boot/1.1/vts/functional/Android.bp
new file mode 100644
index 0000000..49ea09a
--- /dev/null
+++ b/boot/1.1/vts/functional/Android.bp
@@ -0,0 +1,28 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+ name: "VtsHalBootV1_1TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: ["VtsHalBootV1_1TargetTest.cpp"],
+ static_libs: [
+ "android.hardware.boot@1.0",
+ "android.hardware.boot@1.1",
+ "libgmock",
+ ],
+ test_suites: ["device-tests"],
+}
+
diff --git a/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp b/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp
new file mode 100644
index 0000000..fba9a5e
--- /dev/null
+++ b/boot/1.1/vts/functional/VtsHalBootV1_1TargetTest.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "boot_hidl_hal_test"
+
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android/hardware/boot/1.1/IBootControl.h>
+#include <android/hardware/boot/1.1/types.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+#include <unistd.h>
+
+using ::android::sp;
+using ::android::hardware::hidl_enum_range;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::boot::V1_1::IBootControl;
+using ::android::hardware::boot::V1_1::MergeStatus;
+using ::testing::Contains;
+
+class BootHidlTest : public testing::TestWithParam<std::string> {
+ public:
+ virtual void SetUp() override {
+ boot = IBootControl::getService(GetParam());
+ ASSERT_NE(boot, nullptr);
+
+ LOG(INFO) << "Test is remote " << boot->isRemote();
+ }
+
+ sp<IBootControl> boot;
+};
+
+static std::vector<MergeStatus> ValidMergeStatusValues() {
+ std::vector<MergeStatus> values;
+ for (const auto value : hidl_enum_range<MergeStatus>()) {
+ if (value == MergeStatus::UNKNOWN) {
+ continue;
+ }
+ values.push_back(value);
+ }
+ return values;
+}
+
+/**
+ * Ensure merge status can be retrieved.
+ */
+TEST_P(BootHidlTest, GetSnapshotMergeStatus) {
+ auto values = ValidMergeStatusValues();
+ auto status = (MergeStatus)boot->getSnapshotMergeStatus();
+ EXPECT_THAT(values, Contains(status));
+}
+
+/**
+ * Ensure merge status can be set to arbitrary value.
+ */
+TEST_P(BootHidlTest, SetSnapshotMergeStatus) {
+ for (const auto value : ValidMergeStatusValues()) {
+ EXPECT_TRUE(boot->setSnapshotMergeStatus(value).withDefault(false));
+ auto status = boot->getSnapshotMergeStatus();
+ EXPECT_EQ(status, value);
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ , BootHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IBootControl::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/camera/device/3.4/default/ExternalCameraDevice.cpp b/camera/device/3.4/default/ExternalCameraDevice.cpp
index 9a2fddf..f518a15 100644
--- a/camera/device/3.4/default/ExternalCameraDevice.cpp
+++ b/camera/device/3.4/default/ExternalCameraDevice.cpp
@@ -754,11 +754,11 @@
int fd, double fpsUpperBound, SupportedV4L2Format* format) {
format->frameRates.clear();
- v4l2_frmivalenum frameInterval {
- .pixel_format = format->fourcc,
- .width = format->width,
- .height = format->height,
- .index = 0
+ v4l2_frmivalenum frameInterval{
+ .index = 0,
+ .pixel_format = format->fourcc,
+ .width = format->width,
+ .height = format->height,
};
for (frameInterval.index = 0;
diff --git a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
index dc5579a..9ff0d74 100644
--- a/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
+++ b/camera/device/3.4/default/ExternalCameraDeviceSession.cpp
@@ -2412,9 +2412,7 @@
mV4L2BufferCount = req_buffers.count;
for (uint32_t i = 0; i < req_buffers.count; i++) {
v4l2_buffer buffer = {
- .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
- .index = i,
- .memory = V4L2_MEMORY_MMAP};
+ .index = i, .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .memory = V4L2_MEMORY_MMAP};
if (TEMP_FAILURE_RETRY(ioctl(mV4l2Fd.get(), VIDIOC_QUERYBUF, &buffer)) < 0) {
ALOGE("%s: QUERYBUF %d failed: %s", __FUNCTION__, i, strerror(errno));
diff --git a/cas/1.0/vts/functional/Android.bp b/cas/1.0/vts/functional/Android.bp
index 622baa5..ab39c0e 100644
--- a/cas/1.0/vts/functional/Android.bp
+++ b/cas/1.0/vts/functional/Android.bp
@@ -29,6 +29,6 @@
shared_libs: [
"libbinder",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/cas/1.0/vts/functional/VtsHalCasV1_0TargetTest.cpp b/cas/1.0/vts/functional/VtsHalCasV1_0TargetTest.cpp
index 14b8bbd..0f16de5 100644
--- a/cas/1.0/vts/functional/VtsHalCasV1_0TargetTest.cpp
+++ b/cas/1.0/vts/functional/VtsHalCasV1_0TargetTest.cpp
@@ -16,8 +16,6 @@
#define LOG_TAG "mediacas_hidl_hal_test"
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <android-base/logging.h>
#include <android/hardware/cas/1.0/ICas.h>
#include <android/hardware/cas/1.0/ICasListener.h>
@@ -27,8 +25,11 @@
#include <android/hardware/cas/native/1.0/IDescrambler.h>
#include <android/hardware/cas/native/1.0/types.h>
#include <binder/MemoryDealer.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
+#include <hidl/ServiceManagement.h>
#include <hidl/Status.h>
#include <hidlmemory/FrameworkUtils.h>
#include <utils/Condition.h>
@@ -208,29 +209,16 @@
EXPECT_TRUE(mEventData == eventData);
}
-// Test environment for Cas HIDL HAL.
-class CasHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static CasHidlEnvironment* Instance() {
- static CasHidlEnvironment* instance = new CasHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IMediaCasService>(); }
-};
-
-class MediaCasHidlTest : public ::testing::VtsHalHidlTargetTestBase {
- public:
+class MediaCasHidlTest : public testing::TestWithParam<std::string> {
+ public:
virtual void SetUp() override {
- mService = ::testing::VtsHalHidlTargetTestBase::getService<IMediaCasService>(
- CasHidlEnvironment::Instance()->getServiceName<IMediaCasService>());
+ mService = IMediaCasService::getService(GetParam());
ASSERT_NE(mService, nullptr);
}
- sp<IMediaCasService> mService;
+ sp<IMediaCasService> mService = nullptr;
- protected:
+ protected:
static void description(const std::string& description) {
RecordProperty("description", description);
}
@@ -325,7 +313,7 @@
return ::testing::AssertionFailure();
}
- uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mem->pointer()));
+ uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mem->unsecurePointer()));
memcpy(ipBuffer, kInBinaryBuffer, sizeof(kInBinaryBuffer));
// hidlMemory is not to be passed out of scope!
@@ -419,7 +407,7 @@
return ::testing::AssertionResult(returnVoid.isOk());
}
-TEST_F(MediaCasHidlTest, EnumeratePlugins) {
+TEST_P(MediaCasHidlTest, EnumeratePlugins) {
description("Test enumerate plugins");
hidl_vec<HidlCasPluginDescriptor> descriptors;
EXPECT_TRUE(mService
@@ -440,7 +428,7 @@
}
}
-TEST_F(MediaCasHidlTest, TestInvalidSystemIdFails) {
+TEST_P(MediaCasHidlTest, TestInvalidSystemIdFails) {
description("Test failure for invalid system ID");
sp<MediaCasListener> casListener = new MediaCasListener();
@@ -458,7 +446,7 @@
EXPECT_EQ(descramblerBase, nullptr);
}
-TEST_F(MediaCasHidlTest, TestClearKeyPluginInstalled) {
+TEST_P(MediaCasHidlTest, TestClearKeyPluginInstalled) {
description("Test if ClearKey plugin is installed");
hidl_vec<HidlCasPluginDescriptor> descriptors;
EXPECT_TRUE(mService
@@ -480,7 +468,7 @@
ASSERT_TRUE(false) << "ClearKey plugin not installed";
}
-TEST_F(MediaCasHidlTest, TestClearKeyApis) {
+TEST_P(MediaCasHidlTest, TestClearKeyApis) {
description("Test that valid call sequences succeed");
ASSERT_TRUE(createCasPlugin(CLEAR_KEY_SYSTEM_ID));
@@ -568,7 +556,7 @@
EXPECT_EQ(Status::OK, descrambleStatus);
ASSERT_NE(nullptr, dataMemory.get());
- uint8_t* opBuffer = static_cast<uint8_t*>(static_cast<void*>(dataMemory->pointer()));
+ uint8_t* opBuffer = static_cast<uint8_t*>(static_cast<void*>(dataMemory->unsecurePointer()));
int compareResult =
memcmp(static_cast<const void*>(opBuffer), static_cast<const void*>(kOutRefBinaryBuffer),
@@ -584,7 +572,7 @@
EXPECT_EQ(Status::OK, returnStatus);
}
-TEST_F(MediaCasHidlTest, TestClearKeySessionClosedAfterRelease) {
+TEST_P(MediaCasHidlTest, TestClearKeySessionClosedAfterRelease) {
description("Test that all sessions are closed after a MediaCas object is released");
ASSERT_TRUE(createCasPlugin(CLEAR_KEY_SYSTEM_ID));
@@ -611,7 +599,7 @@
EXPECT_EQ(Status::ERROR_CAS_SESSION_NOT_OPENED, returnStatus);
}
-TEST_F(MediaCasHidlTest, TestClearKeyErrors) {
+TEST_P(MediaCasHidlTest, TestClearKeyErrors) {
description("Test that invalid call sequences fail with expected error codes");
ASSERT_TRUE(createCasPlugin(CLEAR_KEY_SYSTEM_ID));
@@ -700,7 +688,7 @@
EXPECT_FALSE(mDescramblerBase->requiresSecureDecoderComponent("bad"));
}
-TEST_F(MediaCasHidlTest, TestClearKeyOobFails) {
+TEST_P(MediaCasHidlTest, TestClearKeyOobFails) {
description("Test that oob descramble request fails with expected error");
ASSERT_TRUE(createCasPlugin(CLEAR_KEY_SYSTEM_ID));
@@ -849,11 +837,7 @@
} // anonymous namespace
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(CasHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- CasHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, MediaCasHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IMediaCasService::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/cas/1.1/vts/functional/Android.bp b/cas/1.1/vts/functional/Android.bp
index 8afd19a..9e8eb52 100644
--- a/cas/1.1/vts/functional/Android.bp
+++ b/cas/1.1/vts/functional/Android.bp
@@ -30,6 +30,6 @@
shared_libs: [
"libbinder",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/cas/1.1/vts/functional/VtsHalCasV1_1TargetTest.cpp b/cas/1.1/vts/functional/VtsHalCasV1_1TargetTest.cpp
index 88f1fb0..7e5a61a 100644
--- a/cas/1.1/vts/functional/VtsHalCasV1_1TargetTest.cpp
+++ b/cas/1.1/vts/functional/VtsHalCasV1_1TargetTest.cpp
@@ -27,8 +27,11 @@
#include <android/hardware/cas/native/1.0/IDescrambler.h>
#include <android/hardware/cas/native/1.0/types.h>
#include <binder/MemoryDealer.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
+#include <hidl/ServiceManagement.h>
#include <hidl/Status.h>
#include <hidlmemory/FrameworkUtils.h>
#include <utils/Condition.h>
@@ -251,27 +254,14 @@
EXPECT_TRUE(mEventData == eventData);
}
-// Test environment for Cas HIDL HAL.
-class CasHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static CasHidlEnvironment* Instance() {
- static CasHidlEnvironment* instance = new CasHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IMediaCasService>(); }
-};
-
-class MediaCasHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+class MediaCasHidlTest : public testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
- mService = ::testing::VtsHalHidlTargetTestBase::getService<IMediaCasService>(
- CasHidlEnvironment::Instance()->getServiceName<IMediaCasService>());
+ mService = IMediaCasService::getService(GetParam());
ASSERT_NE(mService, nullptr);
}
- sp<IMediaCasService> mService;
+ sp<IMediaCasService> mService = nullptr;
protected:
static void description(const std::string& description) {
@@ -366,7 +356,7 @@
return ::testing::AssertionFailure();
}
- uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mem->pointer()));
+ uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mem->unsecurePointer()));
memcpy(ipBuffer, kInBinaryBuffer, sizeof(kInBinaryBuffer));
// hidlMemory is not to be passed out of scope!
@@ -453,7 +443,7 @@
return ::testing::AssertionResult(returnVoid.isOk());
}
-TEST_F(MediaCasHidlTest, TestClearKeyApisWithSession) {
+TEST_P(MediaCasHidlTest, TestClearKeyApisWithSession) {
description("Test that valid call sequences with SessionEvent send and receive");
ASSERT_TRUE(createCasPlugin(CLEAR_KEY_SYSTEM_ID));
@@ -543,7 +533,7 @@
EXPECT_EQ(Status::OK, descrambleStatus);
ASSERT_NE(nullptr, dataMemory.get());
- uint8_t* opBuffer = static_cast<uint8_t*>(static_cast<void*>(dataMemory->pointer()));
+ uint8_t* opBuffer = static_cast<uint8_t*>(static_cast<void*>(dataMemory->unsecurePointer()));
int compareResult =
memcmp(static_cast<const void*>(opBuffer),
@@ -561,11 +551,7 @@
} // anonymous namespace
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(CasHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- CasHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- return status;
-}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, MediaCasHidlTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IMediaCasService::descriptor)),
+ android::hardware::PrintInstanceNameToString);
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index d708b57..34cbe26 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -91,7 +91,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.boot</name>
- <version>1.0</version>
+ <version>1.1</version>
<interface>
<name>IBootControl</name>
<instance>default</instance>
@@ -206,7 +206,7 @@
</hal>
<hal format="hidl" optional="false">
<name>android.hardware.graphics.composer</name>
- <version>2.1-3</version>
+ <version>2.1-4</version>
<interface>
<name>IComposer</name>
<instance>default</instance>
@@ -309,7 +309,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.neuralnetworks</name>
- <version>1.0-2</version>
+ <version>1.0-3</version>
<interface>
<name>IDevice</name>
<regex-instance>.*</regex-instance>
@@ -469,7 +469,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.vibrator</name>
- <version>1.0-3</version>
+ <version>1.0-4</version>
<interface>
<name>IVibrator</name>
<instance>default</instance>
@@ -493,7 +493,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.wifi</name>
- <version>1.0-3</version>
+ <version>1.0-4</version>
<interface>
<name>IWifi</name>
<instance>default</instance>
@@ -509,7 +509,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.wifi.supplicant</name>
- <version>1.0-2</version>
+ <version>1.0-3</version>
<interface>
<name>ISupplicant</name>
<instance>default</instance>
diff --git a/confirmationui/1.0/vts/functional/Android.bp b/confirmationui/1.0/vts/functional/Android.bp
index d19d702..fd088cd 100644
--- a/confirmationui/1.0/vts/functional/Android.bp
+++ b/confirmationui/1.0/vts/functional/Android.bp
@@ -23,7 +23,7 @@
static_libs: [
"android.hardware.confirmationui@1.0",
"android.hardware.keymaster@4.0",
- "libcrypto",
+ "libcrypto_static",
"libcn-cbor",
"android.hardware.confirmationui-support-lib",
],
diff --git a/contexthub/1.0/default/OWNERS b/contexthub/1.0/default/OWNERS
index 5373073..90c2330 100644
--- a/contexthub/1.0/default/OWNERS
+++ b/contexthub/1.0/default/OWNERS
@@ -1,4 +1,3 @@
-aarossig@google.com
arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
diff --git a/contexthub/1.0/vts/functional/OWNERS b/contexthub/1.0/vts/functional/OWNERS
index ee01441..045cc4e 100644
--- a/contexthub/1.0/vts/functional/OWNERS
+++ b/contexthub/1.0/vts/functional/OWNERS
@@ -1,8 +1,7 @@
#Context Hub team
-aarossig@google.com
arthuri@google.com
bduddie@google.com
-bstack@google.com
+stange@google.com
#VTS team
yim@google.com
diff --git a/current.txt b/current.txt
index 44eebf8..87649d2 100644
--- a/current.txt
+++ b/current.txt
@@ -575,7 +575,26 @@
2410dd02d67786a732d36e80b0f8ccf55086604ef37f9838e2013ff2c571e404 android.hardware.camera.device@3.5::types
b69a7615c508acf5c5201efd1bfa3262167874fc3594e2db5a3ff93addd8ac75 android.hardware.keymaster@4.0::IKeymasterDevice
eb2fa0c883c2185d514be0b84c179b283753ef0c1b77b45b4f359bd23bba8b75 android.hardware.neuralnetworks@1.0::IPreparedModel
+f1109cbb10297b7429a11fab42afa912710b303c9bf20bd5cdb8bd57b9c84186 android.hardware.neuralnetworks@1.0::types
+9d8ee57c490ffeaa28f702eaea8d198cb510e4bbfb99e6cb5f63e73341057c7c android.hardware.neuralnetworks@1.1::types
fb382e986c10b8fbb797a8546e8f9ea6d1107bfe6f3fb7e57f6bbbf1f807a906 android.hardware.neuralnetworks@1.2::IDevice
-6c5081dd131eeb7eb02efece2187cd4d7d554197800bb520c92ff874cc238fa6 android.hardware.neuralnetworks@1.2::IPreparedModel
+40e71cd693de5b832325c5d8f081f2ff20a7ba2b89d401cee5b4b3eb0e241681 android.hardware.neuralnetworks@1.2::IPreparedModel
+71c0f7127335e5b74d1615d5e7f129831b43ffbae5318ad0924d7d8d8910a859 android.hardware.neuralnetworks@1.2::types
+a785a57447a81e9c130eef6904c3a5c256076c6a04588c40620ebd6fa2660d77 android.hardware.radio@1.2::types
1a6e2bd289f22931c526b21916910f1d4c436b7acb9556e4243de4ce8e6cc2e4 android.hardware.soundtrigger@2.0::ISoundTriggerHwCallback
fd65298e1e09e0e3c781ab18305920d757dbe55a3b459ce17814ec5cf6dfee99 android.hardware.wifi@1.0::IWifiP2pIface
+
+# HALs released in Android R
+07d0a252b2d8fa35887908a996ba395cf392968395fc30afab791f46e0c22a52 android.hardware.boot@1.1::IBootControl
+74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types
+34515afa2bb792d3c6d8495a5f5d907d179c8507ca5e55c10050d02ae1d516ef android.hardware.neuralnetworks@1.3::IDevice
+b74fe72cfe438f50e772e6a307657ff449d5bde83c15dd1f140ff2edbe73499c android.hardware.neuralnetworks@1.3::types
+04395b26be33db17747c3d3b0e8066d323f891ff4f9f3b3ddb490b2f3f844a18 android.hardware.wifi@1.4::IWifi
+270f0eb670dfd9bc5cd718e09711f2534fa8425f54d06c1a46523ca156b509e2 android.hardware.wifi.supplicant@1.3::ISupplicant
+dd4b7cfbb6e1c6ff011c33920762ad89dd02240c63a4d3a3d5037f154eae3e3b android.hardware.wifi.supplicant@1.3::ISupplicantStaIface
+619fc9839ec6e369cfa9b28e3e9412e6885720ff8f9b5750c1b6ffb905120391 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
+6fe09b18e913608579638594788198ec45bb2369e567d7df661db46c4f0e5f08 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
+91931b05bd70ea6bdffbe075086183f803379571788564e28854207620eb75cf android.hardware.wifi.supplicant@1.3::types
+544049dcda3f943ad67d83d5277f06681a3782982a9af5a78b5d4e8d295d061a android.hardware.vibrator@1.4::IVibrator
+5e1c12efbbba89c9143d10b1b90eceff8bc79aa079f5106215b528e104fef101 android.hardware.vibrator@1.4::IVibratorCallback
+033eae03c09ebc75e82db37bc39995dfaa9086745577b44d9e14e9ccb48bd8cc android.hardware.vibrator@1.4::types
diff --git a/drm/1.0/vts/functional/Android.bp b/drm/1.0/vts/functional/Android.bp
index d6ebfdd..61d4d58 100644
--- a/drm/1.0/vts/functional/Android.bp
+++ b/drm/1.0/vts/functional/Android.bp
@@ -30,7 +30,7 @@
"libhidlmemory",
"libnativehelper",
"libssl",
- "libcrypto",
+ "libcrypto_static",
],
test_suites: ["general-tests"],
}
diff --git a/drm/1.2/vts/functional/Android.bp b/drm/1.2/vts/functional/Android.bp
index 6b4a4c0..95883bf 100644
--- a/drm/1.2/vts/functional/Android.bp
+++ b/drm/1.2/vts/functional/Android.bp
@@ -34,7 +34,7 @@
"libhidlmemory",
"libnativehelper",
"libssl",
- "libcrypto",
+ "libcrypto_static",
],
test_suites: ["general-tests"],
}
diff --git a/drm/TEST_MAPPING b/drm/TEST_MAPPING
new file mode 100644
index 0000000..cff6819
--- /dev/null
+++ b/drm/TEST_MAPPING
@@ -0,0 +1,8 @@
+{
+ "imports": [
+ // gts and cts filters
+ {
+ "path": "frameworks/av/drm/libmediadrm"
+ }
+ ]
+}
diff --git a/dumpstate/1.0/default/DumpstateDevice.cpp b/dumpstate/1.0/default/DumpstateDevice.cpp
index 25d92b0..c57bf43 100644
--- a/dumpstate/1.0/default/DumpstateDevice.cpp
+++ b/dumpstate/1.0/default/DumpstateDevice.cpp
@@ -37,11 +37,6 @@
// NOTE: this is just an example on how to use the DumpstateUtil.h functions to implement
// this interface.
- // Exit when dump is completed since this is a lazy HAL.
- addPostCommandTask([]() {
- exit(0);
- });
-
if (handle == nullptr || handle->numFds < 1) {
ALOGE("no FDs\n");
return Void();
diff --git a/dumpstate/1.0/default/service.cpp b/dumpstate/1.0/default/service.cpp
index 4f276b7..76c72b5 100644
--- a/dumpstate/1.0/default/service.cpp
+++ b/dumpstate/1.0/default/service.cpp
@@ -15,22 +15,26 @@
*/
#define LOG_TAG "android.hardware.dumpstate@1.0-service"
+#include <hidl/HidlLazyUtils.h>
#include <hidl/HidlSupport.h>
#include <hidl/HidlTransportSupport.h>
#include "DumpstateDevice.h"
-using ::android::hardware::configureRpcThreadpool;
-using ::android::hardware::dumpstate::V1_0::IDumpstateDevice;
-using ::android::hardware::dumpstate::V1_0::implementation::DumpstateDevice;
-using ::android::hardware::joinRpcThreadpool;
using ::android::OK;
using ::android::sp;
+using ::android::hardware::configureRpcThreadpool;
+using ::android::hardware::joinRpcThreadpool;
+using ::android::hardware::LazyServiceRegistrar;
+using ::android::hardware::dumpstate::V1_0::IDumpstateDevice;
+using ::android::hardware::dumpstate::V1_0::implementation::DumpstateDevice;
int main(int /* argc */, char* /* argv */ []) {
sp<IDumpstateDevice> dumpstate = new DumpstateDevice;
configureRpcThreadpool(1, true /* will join */);
- if (dumpstate->registerAsService() != OK) {
+
+ auto registrar = LazyServiceRegistrar::getInstance();
+ if (registrar.registerService(dumpstate) != OK) {
ALOGE("Could not register service.");
return 1;
}
diff --git a/gnss/1.0/default/Gnss.cpp b/gnss/1.0/default/Gnss.cpp
index 32c131c..7d1cacf 100644
--- a/gnss/1.0/default/Gnss.cpp
+++ b/gnss/1.0/default/Gnss.cpp
@@ -128,20 +128,20 @@
for (size_t i = 0; i < svStatus.numSvs; i++) {
auto svInfo = status->gnss_sv_list[i];
IGnssCallback::GnssSvInfo gnssSvInfo = {
- .svid = svInfo.svid,
- .constellation = static_cast<
- android::hardware::gnss::V1_0::GnssConstellationType>(
- svInfo.constellation),
- .cN0Dbhz = svInfo.c_n0_dbhz,
- .elevationDegrees = svInfo.elevation,
- .azimuthDegrees = svInfo.azimuth,
- // Older chipsets do not provide carrier frequency, hence
- // HAS_CARRIER_FREQUENCY flag and the carrierFrequencyHz fields
- // are not set. So we are resetting both fields here.
- .svFlag = static_cast<uint8_t>(
- svInfo.flags &= ~(static_cast<uint8_t>(
- IGnssCallback::GnssSvFlags::HAS_CARRIER_FREQUENCY))),
- .carrierFrequencyHz = 0};
+ .svid = svInfo.svid,
+ .constellation = static_cast<android::hardware::gnss::V1_0::GnssConstellationType>(
+ svInfo.constellation),
+ .cN0Dbhz = svInfo.c_n0_dbhz,
+ .elevationDegrees = svInfo.elevation,
+ .azimuthDegrees = svInfo.azimuth,
+ .carrierFrequencyHz = 0,
+ // Older chipsets do not provide carrier frequency, hence
+ // HAS_CARRIER_FREQUENCY flag and the carrierFrequencyHz fields
+ // are not set. So we are resetting both fields here.
+ .svFlag = static_cast<uint8_t>(
+ svInfo.flags &=
+ ~(static_cast<uint8_t>(IGnssCallback::GnssSvFlags::HAS_CARRIER_FREQUENCY))),
+ };
svStatus.gnssSvList[i] = gnssSvInfo;
}
diff --git a/gnss/1.0/vts/functional/Android.bp b/gnss/1.0/vts/functional/Android.bp
index 505cb41..d73b32e 100644
--- a/gnss/1.0/vts/functional/Android.bp
+++ b/gnss/1.0/vts/functional/Android.bp
@@ -19,5 +19,5 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalGnssV1_0TargetTest.cpp"],
static_libs: ["android.hardware.gnss@1.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp b/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
index c26f60a..1a80ecf 100644
--- a/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
+++ b/gnss/1.0/vts/functional/VtsHalGnssV1_0TargetTest.cpp
@@ -16,11 +16,11 @@
#define LOG_TAG "VtsHalGnssV1_0TargetTest"
#include <android/hardware/gnss/1.0/IGnss.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include <log/log.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
#include <chrono>
#include <condition_variable>
#include <mutex>
@@ -42,23 +42,8 @@
bool sAgpsIsPresent = false; // if SUPL or XTRA assistance available
bool sSignalIsWeak = false; // if GNSS signals are weak (e.g. light indoor)
-// Test environment for GNSS HIDL HAL.
-class GnssHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static GnssHidlEnvironment* Instance() {
- static GnssHidlEnvironment* instance = new GnssHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IGnss>(); }
-
- private:
- GnssHidlEnvironment() {}
-};
-
// The main test class for GNSS HAL.
-class GnssHalTest : public ::testing::VtsHalHidlTargetTestBase {
+class GnssHalTest : public testing::TestWithParam<std::string> {
public:
virtual void SetUp() override {
// Clean between tests
@@ -67,8 +52,7 @@
info_called_count_ = 0;
notify_count_ = 0;
- gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
- GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
+ gnss_hal_ = IGnss::getService(GetParam());
ASSERT_NE(gnss_hal_, nullptr);
gnss_cb_ = new GnssCallback(*this);
@@ -344,14 +328,14 @@
* Since this is just the basic operation of SetUp() and TearDown(),
* the function definition is intentionally empty
*/
-TEST_F(GnssHalTest, SetCallbackCapabilitiesCleanup) {}
+TEST_P(GnssHalTest, SetCallbackCapabilitiesCleanup) {}
/*
* GetLocation:
* Turns on location, waits 45 second for at least 5 locations,
* and checks them for reasonable validity.
*/
-TEST_F(GnssHalTest, GetLocation) {
+TEST_P(GnssHalTest, GetLocation) {
#define MIN_INTERVAL_MSEC 500
#define PREFERRED_ACCURACY 0 // Ideally perfect (matches GnssLocationProvider)
#define PREFERRED_TIME_MSEC 0 // Ideally immediate
@@ -391,7 +375,7 @@
* InjectDelete:
* Ensures that calls to inject and/or delete information state are handled.
*/
-TEST_F(GnssHalTest, InjectDelete) {
+TEST_P(GnssHalTest, InjectDelete) {
// confidently, well north of Alaska
auto result = gnss_hal_->injectLocation(80.0, -170.0, 1000.0);
@@ -424,7 +408,7 @@
* null or actual extension, no crash.
* Confirms year-based required extensions (Measurement & Debug) are present
*/
-TEST_F(GnssHalTest, GetAllExtensions) {
+TEST_P(GnssHalTest, GetAllExtensions) {
// Basic call-is-handled checks
auto gnssXtra = gnss_hal_->getExtensionXtra();
ASSERT_TRUE(gnssXtra.isOk());
@@ -470,7 +454,7 @@
* MeasurementCapabilities:
* Verifies that modern hardware supports measurement capabilities.
*/
-TEST_F(GnssHalTest, MeasurementCapabilites) {
+TEST_P(GnssHalTest, MeasurementCapabilites) {
if (info_called_count_ > 0 && last_info_.yearOfHw >= 2016) {
EXPECT_TRUE(last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENTS);
}
@@ -480,16 +464,19 @@
* SchedulingCapabilities:
* Verifies that 2018+ hardware supports Scheduling capabilities.
*/
-TEST_F(GnssHalTest, SchedulingCapabilities) {
+TEST_P(GnssHalTest, SchedulingCapabilities) {
if (info_called_count_ > 0 && last_info_.yearOfHw >= 2018) {
EXPECT_TRUE(last_capabilities_ & IGnssCallback::Capabilities::SCHEDULING);
}
}
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, GnssHalTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IGnss::descriptor)),
+ android::hardware::PrintInstanceNameToString);
+
int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(GnssHidlEnvironment::Instance());
::testing::InitGoogleTest(&argc, argv);
- GnssHidlEnvironment::Instance()->init(&argc, argv);
/*
* These arguments not used by automated VTS testing.
* Only for use in manual testing, when wanting to run
@@ -502,7 +489,6 @@
sSignalIsWeak = true;
}
}
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+
+ return RUN_ALL_TESTS();
+}
\ No newline at end of file
diff --git a/gnss/1.1/vts/functional/Android.bp b/gnss/1.1/vts/functional/Android.bp
index cc34290..bdd02d2 100644
--- a/gnss/1.1/vts/functional/Android.bp
+++ b/gnss/1.1/vts/functional/Android.bp
@@ -30,5 +30,5 @@
shared_libs: [
"android.hardware.gnss.measurement_corrections@1.0",
],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/gnss/1.1/vts/functional/VtsHalGnssV1_1TargetTest.cpp b/gnss/1.1/vts/functional/VtsHalGnssV1_1TargetTest.cpp
index ca9eef4..4a0a7f9 100644
--- a/gnss/1.1/vts/functional/VtsHalGnssV1_1TargetTest.cpp
+++ b/gnss/1.1/vts/functional/VtsHalGnssV1_1TargetTest.cpp
@@ -15,15 +15,15 @@
*/
#define LOG_TAG "VtsHalGnssV1_1TargetTest"
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "gnss_hal_test.h"
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(GnssHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- GnssHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+using android::hardware::gnss::V1_1::IGnss;
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, GnssHalTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IGnss::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.cpp b/gnss/1.1/vts/functional/gnss_hal_test.cpp
index 61a2ce4..2c8a7b1 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test.cpp
@@ -17,6 +17,8 @@
#define LOG_TAG "GnssHalTest"
#include <android/hidl/manager/1.2/IServiceManager.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
#include <hidl/ServiceManagement.h>
#include <gnss_hal_test.h>
@@ -29,8 +31,7 @@
using ::android::hardware::gnss::common::Utils;
void GnssHalTest::SetUp() {
- gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
- GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
+ gnss_hal_ = IGnss::getService(GetParam());
ASSERT_NE(gnss_hal_, nullptr);
SetUpGnssCallback();
diff --git a/gnss/1.1/vts/functional/gnss_hal_test.h b/gnss/1.1/vts/functional/gnss_hal_test.h
index e4325bf..169cd62 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test.h
+++ b/gnss/1.1/vts/functional/gnss_hal_test.h
@@ -19,8 +19,7 @@
#include <android/hardware/gnss/1.1/IGnss.h>
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
+#include <gtest/gtest.h>
#include "GnssCallbackEventQueue.h"
using android::hardware::Return;
@@ -37,24 +36,9 @@
#define TIMEOUT_SEC 2 // for basic commands/responses
-// Test environment for GNSS HIDL HAL.
-class GnssHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static GnssHidlEnvironment* Instance() {
- static GnssHidlEnvironment* instance = new GnssHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IGnss>(); }
-
- private:
- GnssHidlEnvironment() {}
-};
-
// The main test class for GNSS HAL.
-class GnssHalTest : public ::testing::VtsHalHidlTargetTestBase {
- public:
+class GnssHalTest : public testing::TestWithParam<std::string> {
+ public:
virtual void SetUp() override;
virtual void TearDown() override;
diff --git a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
index 3294bcd..79da84a 100644
--- a/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/1.1/vts/functional/gnss_hal_test_cases.cpp
@@ -18,9 +18,8 @@
#include <gnss_hal_test.h>
-#include <VtsHalHidlTargetTestBase.h>
-
#include <android/hardware/gnss/1.1/IGnssConfiguration.h>
+#include <gtest/gtest.h>
using android::hardware::hidl_vec;
@@ -39,13 +38,13 @@
*
* Empty test fixture to verify basic Setup & Teardown
*/
-TEST_F(GnssHalTest, SetupTeardownCreateCleanup) {}
+TEST_P(GnssHalTest, SetupTeardownCreateCleanup) {}
/*
* TestGnssMeasurementCallback:
* Gets the GnssMeasurementExtension and verify that it returns an actual extension.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementCallback) {
+TEST_P(GnssHalTest, TestGnssMeasurementCallback) {
auto gnssMeasurement_1_1 = gnss_hal_->getExtensionGnssMeasurement_1_1();
ASSERT_TRUE(gnssMeasurement_1_1.isOk());
auto gnssMeasurement_1_0 = gnss_hal_->getExtensionGnssMeasurement();
@@ -65,7 +64,7 @@
* NO_LOCATION_PERIOD_SEC and verfiy that no location is received. Also perform validity checks on
* each received location.
*/
-TEST_F(GnssHalTest, GetLocationLowPower) {
+TEST_P(GnssHalTest, GetLocationLowPower) {
if (!IsGnssHalVersion_1_1()) {
ALOGI("Test GetLocationLowPower skipped. GNSS HAL version is greater than 1.1.");
return;
@@ -97,7 +96,6 @@
gnss_cb_->location_cbq_.retrieve(gnss_cb_->last_location_, kNoLocationPeriodSec);
const int location_called_count = gnss_cb_->location_cbq_.calledCount();
-
// Tolerate (ignore) one extra location right after the first one
// to handle startup edge case scheduling limitations in some implementations
if ((i == 1) && (location_called_count == 2)) {
@@ -132,7 +130,8 @@
*/
IGnssConfiguration::BlacklistedSource FindStrongFrequentNonGpsSource(
- const list<IGnssCallback::GnssSvStatus> list_gnss_sv_status, const int min_observations) {
+ const std::list<IGnssCallback::GnssSvStatus> list_gnss_sv_status,
+ const int min_observations) {
struct ComparableBlacklistedSource {
IGnssConfiguration::BlacklistedSource id;
@@ -218,7 +217,7 @@
* 5b) Retry a few times, in case GNSS search strategy takes a while to reacquire even the
* formerly strongest satellite
*/
-TEST_F(GnssHalTest, BlacklistIndividualSatellites) {
+TEST_P(GnssHalTest, BlacklistIndividualSatellites) {
if (!IsGnssHalVersion_1_1()) {
ALOGI("Test BlacklistIndividualSatellites skipped. GNSS HAL version is greater than 1.1.");
return;
@@ -244,7 +243,7 @@
*/
const int kGnssSvStatusTimeout = 2;
- list<IGnssCallback::GnssSvStatus> sv_status_list;
+ std::list<IGnssCallback::GnssSvStatus> sv_status_list;
int count = gnss_cb_->sv_status_cbq_.retrieve(sv_status_list, sv_status_cbq_size,
kGnssSvStatusTimeout);
ASSERT_EQ(count, sv_status_cbq_size);
@@ -362,7 +361,7 @@
* GnssStatus does not use any constellation but GPS.
* 4a & b) Clean up by turning off location, and send in empty blacklist.
*/
-TEST_F(GnssHalTest, BlacklistConstellation) {
+TEST_P(GnssHalTest, BlacklistConstellation) {
if (!IsGnssHalVersion_1_1()) {
ALOGI("Test BlacklistConstellation skipped. GNSS HAL version is greater than 1.1.");
return;
@@ -457,7 +456,7 @@
*
* Ensure successfully injecting a location.
*/
-TEST_F(GnssHalTest, InjectBestLocation) {
+TEST_P(GnssHalTest, InjectBestLocation) {
StartAndCheckLocations(1);
GnssLocation gnssLocation = gnss_cb_->last_location_;
CheckLocation(gnssLocation, true);
@@ -476,7 +475,7 @@
* GnssDebugValuesSanityTest:
* Ensures that GnssDebug values make sense.
*/
-TEST_F(GnssHalTest, GnssDebugValuesSanityTest) {
+TEST_P(GnssHalTest, GnssDebugValuesSanityTest) {
auto gnssDebug = gnss_hal_->getExtensionGnssDebug();
ASSERT_TRUE(gnssDebug.isOk());
if (gnss_cb_->info_cbq_.calledCount() > 0 && gnss_cb_->last_info_.yearOfHw >= 2017) {
diff --git a/gnss/2.0/default/GnssMeasurement.cpp b/gnss/2.0/default/GnssMeasurement.cpp
index 93de89c..1f95ff9 100644
--- a/gnss/2.0/default/GnssMeasurement.cpp
+++ b/gnss/2.0/default/GnssMeasurement.cpp
@@ -119,12 +119,13 @@
V2_0::IGnssMeasurementCallback::GnssMeasurement measurement_2_0 = {
.v1_1 = measurement_1_1,
.codeType = "C",
- .constellation = GnssConstellationType::GLONASS,
.state = GnssMeasurementState::STATE_CODE_LOCK | GnssMeasurementState::STATE_BIT_SYNC |
GnssMeasurementState::STATE_SUBFRAME_SYNC |
GnssMeasurementState::STATE_TOW_DECODED |
GnssMeasurementState::STATE_GLO_STRING_SYNC |
- GnssMeasurementState::STATE_GLO_TOD_DECODED};
+ GnssMeasurementState::STATE_GLO_TOD_DECODED,
+ .constellation = GnssConstellationType::GLONASS,
+ };
hidl_vec<IGnssMeasurementCallback::GnssMeasurement> measurements(1);
measurements[0] = measurement_2_0;
diff --git a/gnss/2.0/vts/functional/Android.bp b/gnss/2.0/vts/functional/Android.bp
index 278d87b..9aa1334 100644
--- a/gnss/2.0/vts/functional/Android.bp
+++ b/gnss/2.0/vts/functional/Android.bp
@@ -30,4 +30,5 @@
"android.hardware.gnss@2.0",
"android.hardware.gnss@common-vts-lib",
],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/gnss/2.0/vts/functional/VtsHalGnssV2_0TargetTest.cpp b/gnss/2.0/vts/functional/VtsHalGnssV2_0TargetTest.cpp
index ae36c50..2c74fa3 100644
--- a/gnss/2.0/vts/functional/VtsHalGnssV2_0TargetTest.cpp
+++ b/gnss/2.0/vts/functional/VtsHalGnssV2_0TargetTest.cpp
@@ -15,15 +15,15 @@
*/
#define LOG_TAG "VtsHalGnssV2_0TargetTest"
-#include <VtsHalHidlTargetTestBase.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
#include "gnss_hal_test.h"
-int main(int argc, char** argv) {
- ::testing::AddGlobalTestEnvironment(GnssHidlEnvironment::Instance());
- ::testing::InitGoogleTest(&argc, argv);
- GnssHidlEnvironment::Instance()->init(&argc, argv);
- int status = RUN_ALL_TESTS();
- ALOGI("Test result = %d", status);
- return status;
-}
+using android::hardware::gnss::V2_0::IGnss;
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, GnssHalTest,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IGnss::descriptor)),
+ android::hardware::PrintInstanceNameToString);
\ No newline at end of file
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.cpp b/gnss/2.0/vts/functional/gnss_hal_test.cpp
index 14ae43c..8ca3f68 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test.cpp
@@ -20,12 +20,13 @@
#include <chrono>
#include "Utils.h"
+#include <gtest/gtest.h>
+
using ::android::hardware::gnss::common::Utils;
// Implementations for the main test class for GNSS HAL
void GnssHalTest::SetUp() {
- gnss_hal_ = ::testing::VtsHalHidlTargetTestBase::getService<IGnss>(
- GnssHidlEnvironment::Instance()->getServiceName<IGnss>());
+ gnss_hal_ = IGnss::getService(GetParam());
ASSERT_NE(gnss_hal_, nullptr);
SetUpGnssCallback();
diff --git a/gnss/2.0/vts/functional/gnss_hal_test.h b/gnss/2.0/vts/functional/gnss_hal_test.h
index 7d07c0f..4f7b87a 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test.h
+++ b/gnss/2.0/vts/functional/gnss_hal_test.h
@@ -17,11 +17,11 @@
#ifndef GNSS_HAL_TEST_H_
#define GNSS_HAL_TEST_H_
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
#include <android/hardware/gnss/2.0/IGnss.h>
#include "GnssCallbackEventQueue.h"
+#include <gtest/gtest.h>
+
using android::hardware::hidl_vec;
using android::hardware::Return;
using android::hardware::Void;
@@ -45,24 +45,9 @@
#define TIMEOUT_SEC 2 // for basic commands/responses
-// Test environment for GNSS HIDL HAL.
-class GnssHidlEnvironment : public ::testing::VtsHalHidlTargetTestEnvBase {
- public:
- // get the test environment singleton
- static GnssHidlEnvironment* Instance() {
- static GnssHidlEnvironment* instance = new GnssHidlEnvironment;
- return instance;
- }
-
- virtual void registerTestServices() override { registerTestService<IGnss>(); }
-
- private:
- GnssHidlEnvironment() {}
-};
-
// The main test class for GNSS HAL.
-class GnssHalTest : public ::testing::VtsHalHidlTargetTestBase {
- public:
+class GnssHalTest : public testing::TestWithParam<std::string> {
+ public:
virtual void SetUp() override;
virtual void TearDown() override;
diff --git a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
index ca3edc5..c442cc6 100644
--- a/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
+++ b/gnss/2.0/vts/functional/gnss_hal_test_cases.cpp
@@ -16,10 +16,11 @@
#define LOG_TAG "GnssHalTestCases"
-#include <VtsHalHidlTargetTestBase.h>
#include <gnss_hal_test.h>
#include "Utils.h"
+#include <gtest/gtest.h>
+
using android::hardware::hidl_string;
using android::hardware::hidl_vec;
@@ -51,13 +52,13 @@
*
* Empty test fixture to verify basic Setup & Teardown
*/
-TEST_F(GnssHalTest, SetupTeardownCreateCleanup) {}
+TEST_P(GnssHalTest, SetupTeardownCreateCleanup) {}
/*
* TestGnssMeasurementExtension:
* Gets the GnssMeasurementExtension and verifies that it returns an actual extension.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementExtension) {
+TEST_P(GnssHalTest, TestGnssMeasurementExtension) {
auto gnssMeasurement_2_0 = gnss_hal_->getExtensionGnssMeasurement_2_0();
auto gnssMeasurement_1_1 = gnss_hal_->getExtensionGnssMeasurement_1_1();
auto gnssMeasurement_1_0 = gnss_hal_->getExtensionGnssMeasurement();
@@ -80,7 +81,7 @@
* The GNSS HAL 2.0 implementation must support @2.0::IGnssConfiguration interface due to
* the deprecation of some methods in @1.0::IGnssConfiguration interface.
*/
-TEST_F(GnssHalTest, TestGnssConfigurationExtension) {
+TEST_P(GnssHalTest, TestGnssConfigurationExtension) {
auto gnssConfiguration = gnss_hal_->getExtensionGnssConfiguration_2_0();
ASSERT_TRUE(gnssConfiguration.isOk());
sp<IGnssConfiguration_2_0> iGnssConfiguration = gnssConfiguration;
@@ -96,7 +97,7 @@
* TestGnssConfiguration_setSuplEs_Deprecation:
* Calls setSuplEs and verifies that it returns false.
*/
-TEST_F(GnssHalTest, TestGnssConfiguration_setSuplEs_Deprecation) {
+TEST_P(GnssHalTest, TestGnssConfiguration_setSuplEs_Deprecation) {
auto gnssConfiguration = gnss_hal_->getExtensionGnssConfiguration_2_0();
ASSERT_TRUE(gnssConfiguration.isOk());
sp<IGnssConfiguration_2_0> iGnssConfiguration = gnssConfiguration;
@@ -111,7 +112,7 @@
* TestGnssConfiguration_setGpsLock_Deprecation:
* Calls setGpsLock and verifies that it returns false.
*/
-TEST_F(GnssHalTest, TestGnssConfiguration_setGpsLock_Deprecation) {
+TEST_P(GnssHalTest, TestGnssConfiguration_setGpsLock_Deprecation) {
auto gnssConfiguration = gnss_hal_->getExtensionGnssConfiguration_2_0();
ASSERT_TRUE(gnssConfiguration.isOk());
sp<IGnssConfiguration_2_0> iGnssConfiguration = gnssConfiguration;
@@ -130,7 +131,7 @@
* @2.0::IAGnssRil interface due to the deprecation of framework network API methods needed
* to support the @1.0::IAGnssRil interface.
*/
-TEST_F(GnssHalTest, TestAGnssRilExtension) {
+TEST_P(GnssHalTest, TestAGnssRilExtension) {
auto agnssRil_2_0 = gnss_hal_->getExtensionAGnssRil_2_0();
ASSERT_TRUE(agnssRil_2_0.isOk());
sp<IAGnssRil_2_0> iAGnssRil_2_0 = agnssRil_2_0;
@@ -148,7 +149,7 @@
* 1. Updates GNSS HAL that a network has connected.
* 2. Updates GNSS HAL that network has disconnected.
*/
-TEST_F(GnssHalTest, TestAGnssRil_UpdateNetworkState_2_0) {
+TEST_P(GnssHalTest, TestAGnssRil_UpdateNetworkState_2_0) {
auto agnssRil = gnss_hal_->getExtensionAGnssRil_2_0();
ASSERT_TRUE(agnssRil.isOk());
sp<IAGnssRil_2_0> iAGnssRil = agnssRil;
@@ -180,7 +181,7 @@
* 2. constellation is valid.
* 3. state is valid.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementFields) {
+TEST_P(GnssHalTest, TestGnssMeasurementFields) {
const int kFirstGnssMeasurementTimeoutSeconds = 10;
auto gnssMeasurement = gnss_hal_->getExtensionGnssMeasurement_2_0();
@@ -234,7 +235,7 @@
* @2.0::IAGnss interface due to the deprecation of framework network API methods needed
* to support the @1.0::IAGnss interface.
*/
-TEST_F(GnssHalTest, TestAGnssExtension) {
+TEST_P(GnssHalTest, TestAGnssExtension) {
auto agnss_2_0 = gnss_hal_->getExtensionAGnss_2_0();
ASSERT_TRUE(agnss_2_0.isOk());
sp<IAGnss_2_0> iAGnss_2_0 = agnss_2_0;
@@ -258,7 +259,7 @@
* TestGnssNiExtension_Deprecation:
* Gets the @1.0::IGnssNi extension and verifies that it is a nullptr.
*/
-TEST_F(GnssHalTest, TestGnssNiExtension_Deprecation) {
+TEST_P(GnssHalTest, TestGnssNiExtension_Deprecation) {
// Verify IGnssNi 1.0 is not supported.
auto gnssNi = gnss_hal_->getExtensionGnssNi();
ASSERT_TRUE(!gnssNi.isOk() || ((sp<IGnssNi>)gnssNi) == nullptr);
@@ -269,7 +270,7 @@
* Gets the GnssVisibilityControlExtension and if it is not null, verifies that it supports
* the gnss.visibility_control@1.0::IGnssVisibilityControl interface by invoking a method.
*/
-TEST_F(GnssHalTest, TestGnssVisibilityControlExtension) {
+TEST_P(GnssHalTest, TestGnssVisibilityControlExtension) {
auto gnssVisibilityControl = gnss_hal_->getExtensionVisibilityControl();
ASSERT_TRUE(gnssVisibilityControl.isOk());
sp<IGnssVisibilityControl> iGnssVisibilityControl = gnssVisibilityControl;
@@ -290,7 +291,7 @@
* capabilities are reported and the mandatory LOS_SATS or the EXCESS_PATH_LENGTH
* capability flag is set.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementCorrectionsCapabilities) {
+TEST_P(GnssHalTest, TestGnssMeasurementCorrectionsCapabilities) {
if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
return;
}
@@ -318,7 +319,7 @@
* If measurement corrections capability is supported, verifies that it supports the
* gnss.measurement_corrections@1.0::IMeasurementCorrections interface by invoking a method.
*/
-TEST_F(GnssHalTest, TestGnssMeasurementCorrections) {
+TEST_P(GnssHalTest, TestGnssMeasurementCorrections) {
if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::MEASUREMENT_CORRECTIONS)) {
return;
}
@@ -348,7 +349,7 @@
* Sets a GnssMeasurementCallback, waits for a GnssData object, and verifies the flags in member
* elapsedRealitme are valid.
*/
-TEST_F(GnssHalTest, TestGnssDataElapsedRealtimeFlags) {
+TEST_P(GnssHalTest, TestGnssDataElapsedRealtimeFlags) {
const int kFirstGnssMeasurementTimeoutSeconds = 10;
auto gnssMeasurement = gnss_hal_->getExtensionGnssMeasurement_2_0();
@@ -383,7 +384,7 @@
iGnssMeasurement->close();
}
-TEST_F(GnssHalTest, TestGnssLocationElapsedRealtime) {
+TEST_P(GnssHalTest, TestGnssLocationElapsedRealtime) {
StartAndCheckFirstLocation();
ASSERT_TRUE((int)gnss_cb_->last_location_.elapsedRealtime.flags <=
@@ -399,7 +400,7 @@
}
// This test only verify that injectBestLocation_2_0 does not crash.
-TEST_F(GnssHalTest, TestInjectBestLocation_2_0) {
+TEST_P(GnssHalTest, TestInjectBestLocation_2_0) {
StartAndCheckFirstLocation();
gnss_hal_->injectBestLocation_2_0(gnss_cb_->last_location_);
StopAndClearLocations();
@@ -410,7 +411,7 @@
* Gets the @2.0::IGnssBatching extension and verifies that it doesn't return an error. Support
* for this interface is optional.
*/
-TEST_F(GnssHalTest, TestGnssBatchingExtension) {
+TEST_P(GnssHalTest, TestGnssBatchingExtension) {
auto gnssBatching_2_0 = gnss_hal_->getExtensionGnssBatching_2_0();
ASSERT_TRUE(gnssBatching_2_0.isOk());
}
@@ -422,7 +423,7 @@
* NO_LOCATION_PERIOD_SEC and verfiy that no location is received. Also perform validity checks on
* each received location.
*/
-TEST_F(GnssHalTest, GetLocationLowPower) {
+TEST_P(GnssHalTest, GetLocationLowPower) {
if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::LOW_POWER_MODE)) {
ALOGI("Test GetLocationLowPower skipped. LOW_POWER_MODE capability not supported.");
return;
@@ -513,7 +514,7 @@
* or a source with constellation == UNKNOWN if none are found sufficient times
*/
IGnssConfiguration_1_1::BlacklistedSource FindStrongFrequentNonGpsSource(
- const list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>>& sv_info_lists,
+ const std::list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>>& sv_info_lists,
const int min_observations) {
struct ComparableBlacklistedSource {
IGnssConfiguration_1_1::BlacklistedSource id;
@@ -599,7 +600,7 @@
* 5b) Retry a few times, in case GNSS search strategy takes a while to reacquire even the
* formerly strongest satellite
*/
-TEST_F(GnssHalTest, BlacklistIndividualSatellites) {
+TEST_P(GnssHalTest, BlacklistIndividualSatellites) {
if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::SATELLITE_BLACKLIST)) {
ALOGI("Test BlacklistIndividualSatellites skipped. SATELLITE_BLACKLIST capability"
" not supported.");
@@ -626,7 +627,7 @@
*/
const int kGnssSvStatusTimeout = 2;
- list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> sv_info_lists;
+ std::list<hidl_vec<IGnssCallback_2_0::GnssSvInfo>> sv_info_lists;
int count = gnss_cb_->sv_info_list_cbq_.retrieve(sv_info_lists, sv_info_list_cbq_size,
kGnssSvStatusTimeout);
ASSERT_EQ(count, sv_info_list_cbq_size);
@@ -744,7 +745,7 @@
* GnssStatus does not use any constellation but GPS.
* 4a & b) Clean up by turning off location, and send in empty blacklist.
*/
-TEST_F(GnssHalTest, BlacklistConstellation) {
+TEST_P(GnssHalTest, BlacklistConstellation) {
if (!(gnss_cb_->last_capabilities_ & IGnssCallback::Capabilities::SATELLITE_BLACKLIST)) {
ALOGI("Test BlacklistConstellation skipped. SATELLITE_BLACKLIST capability not supported.");
return;
diff --git a/gnss/common/utils/vts/include/GnssCallbackEventQueue.h b/gnss/common/utils/vts/include/GnssCallbackEventQueue.h
index b1d8ed4..3dc429b 100644
--- a/gnss/common/utils/vts/include/GnssCallbackEventQueue.h
+++ b/gnss/common/utils/vts/include/GnssCallbackEventQueue.h
@@ -53,7 +53,7 @@
* timeout_seconds to retrieve each event. If timeout occurs, it returns the number of
* items retrieved which will be less than count.
*/
- int retrieve(list<T>& event_list, int count, int timeout_seconds);
+ int retrieve(std::list<T>& event_list, int count, int timeout_seconds);
/* Returns the number of events pending to be retrieved from the callback event queue. */
int size() const;
@@ -97,7 +97,7 @@
}
template <class T>
-int GnssCallbackEventQueue<T>::retrieve(list<T>& event_list, int count, int timeout_seconds) {
+int GnssCallbackEventQueue<T>::retrieve(std::list<T>& event_list, int count, int timeout_seconds) {
for (int i = 0; i < count; ++i) {
T event;
if (!retrieve(event, timeout_seconds)) {
diff --git a/graphics/composer/2.1/utils/hwc2on1adapter/Android.bp b/graphics/composer/2.1/utils/hwc2on1adapter/Android.bp
index 062f2e5..0af9745 100644
--- a/graphics/composer/2.1/utils/hwc2on1adapter/Android.bp
+++ b/graphics/composer/2.1/utils/hwc2on1adapter/Android.bp
@@ -55,6 +55,7 @@
"-Wno-shorten-64-to-32",
"-Wno-sign-compare",
"-Wno-missing-prototypes",
+ "-Wno-format-pedantic",
],
srcs: [
diff --git a/graphics/composer/2.2/vts/functional/Android.bp b/graphics/composer/2.2/vts/functional/Android.bp
index 2872880..21ba9f3 100644
--- a/graphics/composer/2.2/vts/functional/Android.bp
+++ b/graphics/composer/2.2/vts/functional/Android.bp
@@ -30,8 +30,6 @@
"libfmq",
"libgui",
"libhidlbase",
- "libhidltransport",
- "libhwbinder",
"libprocessgroup",
"libsync",
"libui",
diff --git a/graphics/composer/2.4/IComposer.hal b/graphics/composer/2.4/IComposer.hal
index 34801da..d3b3cb6 100644
--- a/graphics/composer/2.4/IComposer.hal
+++ b/graphics/composer/2.4/IComposer.hal
@@ -17,12 +17,10 @@
package android.hardware.graphics.composer@2.4;
import IComposerClient;
-
import @2.1::Error;
import @2.3::IComposer;
interface IComposer extends @2.3::IComposer {
-
/**
* Creates a v2.4 client of the composer. Supersedes @2.3::createClient.
*
diff --git a/graphics/composer/2.4/IComposerClient.hal b/graphics/composer/2.4/IComposerClient.hal
index 8fe0976..60445f5 100644
--- a/graphics/composer/2.4/IComposerClient.hal
+++ b/graphics/composer/2.4/IComposerClient.hal
@@ -21,13 +21,12 @@
import @2.3::IComposerClient;
interface IComposerClient extends @2.3::IComposerClient {
-
/**
* Required capabilities which are supported by the display. The
* particular set of supported capabilities for a given display may be
* retrieved using getDisplayCapabilities.
*/
- enum DisplayCapability : uint32_t {
+ enum DisplayCapability : @2.3::IComposerClient.DisplayCapability {
/**
* Indicates that the display supports protected contents.
* When returned, hardware composer must be able to accept client target
@@ -37,6 +36,20 @@
};
/**
+ * Supersedes {@link @2.1::IComposerClient.DisplayType}.
+ */
+ enum DisplayConnectionType : uint32_t {
+ /**
+ * Display is connected through internal port, e.g. DSI, eDP.
+ */
+ INTERNAL = 0,
+ /**
+ * Display is connected through external port, e.g. HDMI, DisplayPort.
+ */
+ EXTERNAL = 1,
+ };
+
+ /**
* Provides a list of supported capabilities (as described in the
* definition of DisplayCapability above). This list must not change after
* initialization.
@@ -46,6 +59,14 @@
* @return capabilities is a list of supported capabilities.
*/
getDisplayCapabilities_2_4(Display display)
- generates (Error error,
- vec<DisplayCapability> capabilities);
+ generates (Error error, vec<DisplayCapability> capabilities);
+
+ /**
+ * Returns whether the given physical display is internal or external.
+ *
+ * @return error is NONE upon success. Otherwise,
+ * BAD_DISPLAY when the given display is invalid or virtual.
+ * @return type is the connection type of the display.
+ */
+ getDisplayConnectionType(Display display) generates (Error error, DisplayConnectionType type);
};
diff --git a/graphics/composer/2.4/default/Android.bp b/graphics/composer/2.4/default/Android.bp
index a44e687..a30609b 100644
--- a/graphics/composer/2.4/default/Android.bp
+++ b/graphics/composer/2.4/default/Android.bp
@@ -37,7 +37,6 @@
"libfmq",
"libhardware",
"libhidlbase",
- "libhidltransport",
"libhwc2on1adapter",
"libhwc2onfbadapter",
"liblog",
diff --git a/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerClient.h b/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerClient.h
index 7110c80..c810186 100644
--- a/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerClient.h
+++ b/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerClient.h
@@ -46,6 +46,14 @@
return Void();
}
+ Return<void> getDisplayConnectionType(
+ Display display, IComposerClient::getDisplayConnectionType_cb hidl_cb) override {
+ IComposerClient::DisplayConnectionType type;
+ Error error = mHal->getDisplayConnectionType(display, &type);
+ hidl_cb(error, type);
+ return Void();
+ }
+
static std::unique_ptr<ComposerClientImpl> create(Hal* hal) {
auto client = std::make_unique<ComposerClientImpl>(hal);
return client->init() ? std::move(client) : nullptr;
diff --git a/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerHal.h b/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerHal.h
index 0074808..c3bb535 100644
--- a/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerHal.h
+++ b/graphics/composer/2.4/utils/hal/include/composer-hal/2.4/ComposerHal.h
@@ -38,6 +38,8 @@
public:
virtual Error getDisplayCapabilities_2_4(
Display display, std::vector<IComposerClient::DisplayCapability>* outCapabilities) = 0;
+ virtual Error getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType) = 0;
};
} // namespace hal
diff --git a/graphics/composer/2.4/utils/passthrough/include/composer-passthrough/2.4/HwcHal.h b/graphics/composer/2.4/utils/passthrough/include/composer-passthrough/2.4/HwcHal.h
index 65d47d7..fd05f66 100644
--- a/graphics/composer/2.4/utils/passthrough/include/composer-passthrough/2.4/HwcHal.h
+++ b/graphics/composer/2.4/utils/passthrough/include/composer-passthrough/2.4/HwcHal.h
@@ -62,15 +62,34 @@
return Error::NONE;
}
+ Error getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType) override {
+ if (!mDispatch.getDisplayConnectionType) {
+ return Error::UNSUPPORTED;
+ }
+
+ uint32_t type = HWC2_DISPLAY_CONNECTION_TYPE_INTERNAL;
+ int32_t error = mDispatch.getDisplayConnectionType(mDevice, display, &type);
+ *outType = static_cast<IComposerClient::DisplayConnectionType>(type);
+ return static_cast<Error>(error);
+ }
+
protected:
bool initDispatch() override {
if (!BaseType2_3::initDispatch()) {
return false;
}
+
+ this->initOptionalDispatch(HWC2_FUNCTION_GET_DISPLAY_CONNECTION_TYPE,
+ &mDispatch.getDisplayConnectionType);
return true;
}
private:
+ struct {
+ HWC2_PFN_GET_DISPLAY_CONNECTION_TYPE getDisplayConnectionType;
+ } mDispatch = {};
+
using BaseType2_1 = V2_1::passthrough::detail::HwcHalImpl<Hal>;
using BaseType2_3 = V2_3::passthrough::detail::HwcHalImpl<Hal>;
using BaseType2_1::mDevice;
diff --git a/graphics/composer/2.4/utils/vts/ComposerVts.cpp b/graphics/composer/2.4/utils/vts/ComposerVts.cpp
index ee4f3a3..937b50e 100644
--- a/graphics/composer/2.4/utils/vts/ComposerVts.cpp
+++ b/graphics/composer/2.4/utils/vts/ComposerVts.cpp
@@ -51,7 +51,6 @@
Error ComposerClient::getDisplayCapabilities(
Display display, std::vector<IComposerClient::DisplayCapability>* outCapabilities) {
- std::vector<IComposerClient::DisplayCapability> capabilities;
Error error = Error::NONE;
mClient->getDisplayCapabilities_2_4(display,
[&](const auto& tmpError, const auto& tmpCapabilities) {
@@ -61,6 +60,16 @@
return error;
}
+Error ComposerClient::getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType) {
+ Error error = Error::NONE;
+ mClient->getDisplayConnectionType(display, [&](const auto& tmpError, const auto& tmpType) {
+ error = tmpError;
+ *outType = tmpType;
+ });
+ return error;
+}
+
} // namespace vts
} // namespace V2_4
} // namespace composer
diff --git a/graphics/composer/2.4/utils/vts/include/composer-vts/2.4/ComposerVts.h b/graphics/composer/2.4/utils/vts/include/composer-vts/2.4/ComposerVts.h
index 0a301c6..a7d7f86 100644
--- a/graphics/composer/2.4/utils/vts/include/composer-vts/2.4/ComposerVts.h
+++ b/graphics/composer/2.4/utils/vts/include/composer-vts/2.4/ComposerVts.h
@@ -71,6 +71,9 @@
Display display,
std::vector<IComposerClient::DisplayCapability>* outDisplayCapabilities);
+ Error getDisplayConnectionType(Display display,
+ IComposerClient::DisplayConnectionType* outType);
+
private:
const sp<IComposerClient> mClient;
};
diff --git a/graphics/composer/2.4/vts/functional/Android.bp b/graphics/composer/2.4/vts/functional/Android.bp
index 6ee7873..921c421 100644
--- a/graphics/composer/2.4/vts/functional/Android.bp
+++ b/graphics/composer/2.4/vts/functional/Android.bp
@@ -22,7 +22,6 @@
// TODO(b/64437680): Assume these libs are always available on the device.
shared_libs: [
"libfmq",
- "libhidltransport",
"libsync",
],
static_libs: [
diff --git a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
index 0fccc58..76c0039 100644
--- a/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
+++ b/graphics/composer/2.4/vts/functional/VtsHalGraphicsComposerV2_4TargetTest.cpp
@@ -179,6 +179,16 @@
EXPECT_EQ(Error::BAD_DISPLAY, error);
}
+TEST_F(GraphicsComposerHidlTest, getDisplayConnectionType) {
+ IComposerClient::DisplayConnectionType type;
+ EXPECT_EQ(Error::BAD_DISPLAY,
+ mComposerClient->getDisplayConnectionType(mInvalidDisplayId, &type));
+
+ for (Display display : mComposerCallback->getDisplays()) {
+ EXPECT_EQ(Error::NONE, mComposerClient->getDisplayConnectionType(display, &type));
+ }
+}
+
} // namespace
} // namespace vts
} // namespace V2_4
diff --git a/graphics/mapper/2.0/utils/hal/include/mapper-hal/2.0/Mapper.h b/graphics/mapper/2.0/utils/hal/include/mapper-hal/2.0/Mapper.h
index 5ad2a65..8134174 100644
--- a/graphics/mapper/2.0/utils/hal/include/mapper-hal/2.0/Mapper.h
+++ b/graphics/mapper/2.0/utils/hal/include/mapper-hal/2.0/Mapper.h
@@ -80,17 +80,21 @@
}
Return<Error> freeBuffer(void* buffer) override {
- native_handle_t* bufferHandle = removeImportedBuffer(buffer);
+ native_handle_t* bufferHandle = getImportedBuffer(buffer);
if (!bufferHandle) {
return Error::BAD_BUFFER;
}
- return mHal->freeBuffer(bufferHandle);
+ Error error = mHal->freeBuffer(bufferHandle);
+ if (error == Error::NONE) {
+ removeImportedBuffer(buffer);
+ }
+ return error;
}
Return<void> lock(void* buffer, uint64_t cpuUsage, const V2_0::IMapper::Rect& accessRegion,
const hidl_handle& acquireFence, IMapper::lock_cb hidl_cb) override {
- const native_handle_t* bufferHandle = getImportedBuffer(buffer);
+ const native_handle_t* bufferHandle = getConstImportedBuffer(buffer);
if (!bufferHandle) {
hidl_cb(Error::BAD_BUFFER, nullptr);
return Void();
@@ -112,7 +116,7 @@
Return<void> lockYCbCr(void* buffer, uint64_t cpuUsage, const V2_0::IMapper::Rect& accessRegion,
const hidl_handle& acquireFence,
IMapper::lockYCbCr_cb hidl_cb) override {
- const native_handle_t* bufferHandle = getImportedBuffer(buffer);
+ const native_handle_t* bufferHandle = getConstImportedBuffer(buffer);
if (!bufferHandle) {
hidl_cb(Error::BAD_BUFFER, YCbCrLayout{});
return Void();
@@ -132,7 +136,7 @@
}
Return<void> unlock(void* buffer, IMapper::unlock_cb hidl_cb) override {
- const native_handle_t* bufferHandle = getImportedBuffer(buffer);
+ const native_handle_t* bufferHandle = getConstImportedBuffer(buffer);
if (!bufferHandle) {
hidl_cb(Error::BAD_BUFFER, nullptr);
return Void();
@@ -160,7 +164,11 @@
return static_cast<native_handle_t*>(buffer);
}
- virtual const native_handle_t* getImportedBuffer(void* buffer) const {
+ virtual native_handle_t* getImportedBuffer(void* buffer) const {
+ return static_cast<native_handle_t*>(buffer);
+ }
+
+ virtual const native_handle_t* getConstImportedBuffer(void* buffer) const {
return static_cast<const native_handle_t*>(buffer);
}
diff --git a/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/Gralloc1Hal.h b/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/Gralloc1Hal.h
index d9beb4f..db7e67d 100644
--- a/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/Gralloc1Hal.h
+++ b/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/Gralloc1Hal.h
@@ -282,14 +282,16 @@
}
if (flex.planes[0].component != FLEX_COMPONENT_Y ||
- flex.planes[1].component != FLEX_COMPONENT_Cb ||
- flex.planes[2].component != FLEX_COMPONENT_Cr) {
+ ((flex.planes[1].component != FLEX_COMPONENT_Cb || flex.planes[2].component != FLEX_COMPONENT_Cr) &&
+ (flex.planes[2].component != FLEX_COMPONENT_Cb || flex.planes[1].component != FLEX_COMPONENT_Cr))) {
return false;
}
const auto& y = flex.planes[0];
- const auto& cb = flex.planes[1];
- const auto& cr = flex.planes[2];
+ const auto& cb = (flex.planes[1].component == FLEX_COMPONENT_Cb)?
+ flex.planes[1] : flex.planes[2];
+ const auto& cr = (flex.planes[2].component == FLEX_COMPONENT_Cr)?
+ flex.planes[2] : flex.planes[1];
if (cb.h_increment != cr.h_increment || cb.v_increment != cr.v_increment) {
return false;
diff --git a/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/GrallocLoader.h b/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/GrallocLoader.h
index e8b1b4b..85a91c3 100644
--- a/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/GrallocLoader.h
+++ b/graphics/mapper/2.0/utils/passthrough/include/mapper-passthrough/2.0/GrallocLoader.h
@@ -68,7 +68,14 @@
return mBufferHandles.erase(bufferHandle) == 1 ? bufferHandle : nullptr;
}
- const native_handle_t* get(void* buffer) {
+ native_handle_t* get(void* buffer) {
+ auto bufferHandle = static_cast<native_handle_t*>(buffer);
+
+ std::lock_guard<std::mutex> lock(mMutex);
+ return mBufferHandles.count(bufferHandle) == 1 ? bufferHandle : nullptr;
+ }
+
+ const native_handle_t* getConst(void* buffer) {
auto bufferHandle = static_cast<const native_handle_t*>(buffer);
std::lock_guard<std::mutex> lock(mMutex);
@@ -92,9 +99,13 @@
return GrallocImportedBufferPool::getInstance().remove(buffer);
}
- const native_handle_t* getImportedBuffer(void* buffer) const override {
+ native_handle_t* getImportedBuffer(void* buffer) const override {
return GrallocImportedBufferPool::getInstance().get(buffer);
}
+
+ const native_handle_t* getConstImportedBuffer(void* buffer) const override {
+ return GrallocImportedBufferPool::getInstance().getConst(buffer);
+ }
};
class GrallocLoader {
diff --git a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
index 18fbb6d..8540068 100644
--- a/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
+++ b/graphics/mapper/2.1/utils/passthrough/include/mapper-passthrough/2.1/Gralloc0Hal.h
@@ -37,6 +37,10 @@
Error validateBufferSize(const native_handle_t* bufferHandle,
const IMapper::BufferDescriptorInfo& descriptorInfo,
uint32_t stride) override {
+ if (descriptorInfo.layerCount != 1) {
+ return Error::BAD_VALUE;
+ }
+
if (!mModule->validateBufferSize) {
return Error::NONE;
}
diff --git a/graphics/mapper/4.0/IMapper.hal b/graphics/mapper/4.0/IMapper.hal
index f5df04b..85c7c81 100644
--- a/graphics/mapper/4.0/IMapper.hal
+++ b/graphics/mapper/4.0/IMapper.hal
@@ -202,6 +202,9 @@
* the buffer. If the bytesPerStride is unknown or variable, a value of -1
* should be returned.
*
+ * The locked buffer must adhere to the format requested at allocation time
+ * in the BufferDescriptorInfo.
+ *
* @param buffer Buffer to lock.
* @param cpuUsage CPU usage flags to request. See +ndk
* libnativewindow#AHardwareBuffer_UsageFlags for possible values.
diff --git a/health/2.0/vts/functional/VtsHalHealthV2_0TargetTest.cpp b/health/2.0/vts/functional/VtsHalHealthV2_0TargetTest.cpp
index 74fe4fb..6e13a98 100644
--- a/health/2.0/vts/functional/VtsHalHealthV2_0TargetTest.cpp
+++ b/health/2.0/vts/functional/VtsHalHealthV2_0TargetTest.cpp
@@ -257,15 +257,21 @@
using V1_0::BatteryStatus;
using V1_0::BatteryHealth;
- if (!((health_info.legacy.batteryChargeCounter > 0) &&
- (health_info.legacy.batteryCurrent != INT32_MIN) &&
+ if (!((health_info.legacy.batteryCurrent != INT32_MIN) &&
(0 <= health_info.legacy.batteryLevel && health_info.legacy.batteryLevel <= 100) &&
verifyEnum<BatteryHealth>(health_info.legacy.batteryHealth) &&
- (health_info.legacy.batteryStatus != BatteryStatus::UNKNOWN) &&
verifyEnum<BatteryStatus>(health_info.legacy.batteryStatus))) {
return false;
}
+ if (health_info.legacy.batteryPresent) {
+ // If a battery is present, the battery status must be known.
+ if (!((health_info.legacy.batteryChargeCounter > 0) &&
+ (health_info.legacy.batteryStatus != BatteryStatus::UNKNOWN))) {
+ return false;
+ }
+ }
+
return true;
}
diff --git a/keymaster/3.0/vts/functional/Android.bp b/keymaster/3.0/vts/functional/Android.bp
index b0371c7..69aa56d 100644
--- a/keymaster/3.0/vts/functional/Android.bp
+++ b/keymaster/3.0/vts/functional/Android.bp
@@ -26,7 +26,7 @@
],
static_libs: [
"android.hardware.keymaster@3.0",
- "libcrypto",
+ "libcrypto_static",
"libsoftkeymasterdevice",
],
test_suites: ["general-tests"],
diff --git a/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h b/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
index 97dab68..cb29c64 100644
--- a/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
+++ b/keymaster/4.0/support/include/keymasterV4_0/keymaster_tags.h
@@ -280,39 +280,50 @@
*/
template <typename ValueT>
class NullOr {
- template <typename T>
- struct reference_initializer {
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wnull-dereference"
- static T&& init() { return *static_cast<std::remove_reference_t<T>*>(nullptr); }
-#pragma GCC diagnostic pop
- };
- template <typename T>
- struct pointer_initializer {
- static T init() { return nullptr; }
- };
- template <typename T>
- struct value_initializer {
- static T init() { return T(); }
- };
- template <typename T>
- using initializer_t =
- std::conditional_t<std::is_lvalue_reference<T>::value, reference_initializer<T>,
- std::conditional_t<std::is_pointer<T>::value, pointer_initializer<T>,
- value_initializer<T>>>;
+ using internal_t = std::conditional_t<std::is_lvalue_reference<ValueT>::value,
+ std::remove_reference_t<ValueT>*, ValueT>;
- public:
- NullOr() : value_(initializer_t<ValueT>::init()), null_(true) {}
- NullOr(ValueT&& value) : value_(std::forward<ValueT>(value)), null_(false) {}
+ struct pointer_initializer {
+ static std::nullptr_t init() { return nullptr; }
+ };
+ struct value_initializer {
+ static ValueT init() { return ValueT(); }
+ };
+ struct value_pointer_deref_t {
+ static ValueT& deref(ValueT& v) { return v; }
+ };
+ struct reference_deref_t {
+ static auto& deref(internal_t v) { return *v; }
+ };
+ using initializer_t = std::conditional_t<std::is_lvalue_reference<ValueT>::value ||
+ std::is_pointer<ValueT>::value,
+ pointer_initializer, value_initializer>;
+ using deref_t = std::conditional_t<std::is_lvalue_reference<ValueT>::value, reference_deref_t,
+ value_pointer_deref_t>;
+
+ public:
+ NullOr() : value_(initializer_t::init()), null_(true) {}
+ template <typename T>
+ NullOr(T&& value, typename std::enable_if<
+ !std::is_lvalue_reference<ValueT>::value &&
+ std::is_same<std::decay_t<ValueT>, std::decay_t<T>>::value,
+ int>::type = 0)
+ : value_(std::forward<ValueT>(value)), null_(false) {}
+ template <typename T>
+ NullOr(T& value, typename std::enable_if<
+ std::is_lvalue_reference<ValueT>::value &&
+ std::is_same<std::decay_t<ValueT>, std::decay_t<T>>::value,
+ int>::type = 0)
+ : value_(&value), null_(false) {}
bool isOk() const { return !null_; }
- const ValueT& value() const & { return value_; }
- ValueT& value() & { return value_; }
- ValueT&& value() && { return std::move(value_); }
+ const ValueT& value() const& { return deref_t::deref(value_); }
+ ValueT& value() & { return deref_t::deref(value_); }
+ ValueT&& value() && { return std::move(deref_t::deref(value_)); }
- private:
- ValueT value_;
+ private:
+ internal_t value_;
bool null_;
};
diff --git a/keymaster/4.0/vts/functional/Android.bp b/keymaster/4.0/vts/functional/Android.bp
index 333e408..0401362 100644
--- a/keymaster/4.0/vts/functional/Android.bp
+++ b/keymaster/4.0/vts/functional/Android.bp
@@ -25,7 +25,7 @@
],
static_libs: [
"android.hardware.keymaster@4.0",
- "libcrypto",
+ "libcrypto_static",
"libkeymaster4support",
"libsoftkeymasterdevice",
],
diff --git a/keymaster/4.0/vts/functional/KeymasterHidlTest.cpp b/keymaster/4.0/vts/functional/KeymasterHidlTest.cpp
index 3af1df3..4838e7e 100644
--- a/keymaster/4.0/vts/functional/KeymasterHidlTest.cpp
+++ b/keymaster/4.0/vts/functional/KeymasterHidlTest.cpp
@@ -48,10 +48,11 @@
SecurityLevel KeymasterHidlTest::securityLevel_;
hidl_string KeymasterHidlTest::name_;
hidl_string KeymasterHidlTest::author_;
+string KeymasterHidlTest::service_name_;
-void KeymasterHidlTest::SetUpTestCase() {
- string service_name = KeymasterHidlEnvironment::Instance()->getServiceName<IKeymasterDevice>();
- keymaster_ = ::testing::VtsHalHidlTargetTestBase::getService<IKeymasterDevice>(service_name);
+void KeymasterHidlTest::InitializeKeymaster() {
+ service_name_ = KeymasterHidlEnvironment::Instance()->getServiceName<IKeymasterDevice>();
+ keymaster_ = ::testing::VtsHalHidlTargetTestBase::getService<IKeymasterDevice>(service_name_);
ASSERT_NE(keymaster_, nullptr);
ASSERT_TRUE(keymaster_
@@ -62,18 +63,22 @@
author_ = author;
})
.isOk());
+}
+
+void KeymasterHidlTest::SetUpTestCase() {
+
+ InitializeKeymaster();
os_version_ = ::keymaster::GetOsVersion();
os_patch_level_ = ::keymaster::GetOsPatchlevel();
auto service_manager = android::hidl::manager::V1_0::IServiceManager::getService();
ASSERT_NE(nullptr, service_manager.get());
-
all_keymasters_.push_back(keymaster_);
service_manager->listByInterface(
IKeymasterDevice::descriptor, [&](const hidl_vec<hidl_string>& names) {
for (auto& name : names) {
- if (name == service_name) continue;
+ if (name == service_name_) continue;
auto keymaster =
::testing::VtsHalHidlTargetTestBase::getService<IKeymasterDevice>(name);
ASSERT_NE(keymaster, nullptr);
@@ -269,6 +274,13 @@
return GetCharacteristics(key_blob, client_id, app_data, key_characteristics);
}
+ErrorCode KeymasterHidlTest::GetDebugInfo(DebugInfo* debug_info) {
+ EXPECT_TRUE(keymaster_->getDebugInfo([&](const DebugInfo& hidl_debug_info) {
+ *debug_info = hidl_debug_info;
+ }).isOk());
+ return ErrorCode::OK;
+}
+
ErrorCode KeymasterHidlTest::Begin(KeyPurpose purpose, const HidlBuf& key_blob,
const AuthorizationSet& in_params, AuthorizationSet* out_params,
OperationHandle* op_handle) {
@@ -611,6 +623,20 @@
return ciphertext;
}
+string KeymasterHidlTest::EncryptMessage(const string& message, BlockMode block_mode,
+ PaddingMode padding, uint8_t mac_length_bits,
+ const HidlBuf& iv_in) {
+ SCOPED_TRACE("EncryptMessage");
+ auto params = AuthorizationSetBuilder()
+ .BlockMode(block_mode)
+ .Padding(padding)
+ .Authorization(TAG_MAC_LENGTH, mac_length_bits)
+ .Authorization(TAG_NONCE, iv_in);
+ AuthorizationSet out_params;
+ string ciphertext = EncryptMessage(message, params, &out_params);
+ return ciphertext;
+}
+
string KeymasterHidlTest::DecryptMessage(const HidlBuf& key_blob, const string& ciphertext,
const AuthorizationSet& params) {
SCOPED_TRACE("DecryptMessage");
diff --git a/keymaster/4.0/vts/functional/KeymasterHidlTest.h b/keymaster/4.0/vts/functional/KeymasterHidlTest.h
index 015fc43..b09da45 100644
--- a/keymaster/4.0/vts/functional/KeymasterHidlTest.h
+++ b/keymaster/4.0/vts/functional/KeymasterHidlTest.h
@@ -37,6 +37,7 @@
using ::android::sp;
using ::std::string;
+using hidl::base::V1_0::DebugInfo;
class HidlBuf : public hidl_vec<uint8_t> {
typedef hidl_vec<uint8_t> super;
@@ -95,6 +96,7 @@
// SetUpTestCase runs only once per test case, not once per test.
static void SetUpTestCase();
+ static void InitializeKeymaster();
static void TearDownTestCase() {
keymaster_.clear();
all_keymasters_.clear();
@@ -140,6 +142,8 @@
const HidlBuf& app_data, KeyCharacteristics* key_characteristics);
ErrorCode GetCharacteristics(const HidlBuf& key_blob, KeyCharacteristics* key_characteristics);
+ ErrorCode GetDebugInfo(DebugInfo* debug_info);
+
ErrorCode Begin(KeyPurpose purpose, const HidlBuf& key_blob, const AuthorizationSet& in_params,
AuthorizationSet* out_params, OperationHandle* op_handle);
ErrorCode Begin(KeyPurpose purpose, const AuthorizationSet& in_params,
@@ -201,6 +205,8 @@
HidlBuf* iv_out);
string EncryptMessage(const string& message, BlockMode block_mode, PaddingMode padding,
const HidlBuf& iv_in);
+ string EncryptMessage(const string& message, BlockMode block_mode, PaddingMode padding,
+ uint8_t mac_length_bits, const HidlBuf& iv_in);
string DecryptMessage(const HidlBuf& key_blob, const string& ciphertext,
const AuthorizationSet& params);
@@ -235,6 +241,7 @@
static SecurityLevel securityLevel_;
static hidl_string name_;
static hidl_string author_;
+ static string service_name_;
};
} // namespace test
diff --git a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
index 9e6cce7..0ac7e48 100644
--- a/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
+++ b/keymaster/4.0/vts/functional/keymaster_hidl_hal_test.cpp
@@ -18,6 +18,7 @@
#include <cutils/log.h>
#include <iostream>
+#include <signal.h>
#include <openssl/evp.h>
#include <openssl/mem.h>
@@ -2706,6 +2707,40 @@
}
/*
+ * EncryptionOperationsTest.AesWrongPurpose
+ *
+ * Verifies that AES encryption fails in the correct way when an unauthorized purpose is specified.
+ */
+TEST_F(EncryptionOperationsTest, AesWrongPurpose) {
+ auto err = GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesKey(128)
+ .Authorization(TAG_PURPOSE, KeyPurpose::ENCRYPT)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::GCM)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)
+ .Padding(PaddingMode::NONE));
+ ASSERT_EQ(ErrorCode::OK, err) << "Got " << err;
+
+ err = Begin(KeyPurpose::DECRYPT,
+ AuthorizationSetBuilder().BlockMode(BlockMode::GCM).Padding(PaddingMode::NONE));
+ EXPECT_EQ(ErrorCode::INCOMPATIBLE_PURPOSE, err) << "Got " << err;
+
+ CheckedDeleteKey();
+
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesKey(128)
+ .Authorization(TAG_PURPOSE, KeyPurpose::DECRYPT)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::GCM)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)
+ .Padding(PaddingMode::NONE)));
+
+ err = Begin(KeyPurpose::ENCRYPT,
+ AuthorizationSetBuilder().BlockMode(BlockMode::GCM).Padding(PaddingMode::NONE));
+ EXPECT_EQ(ErrorCode::INCOMPATIBLE_PURPOSE, err) << "Got " << err;
+}
+
+/*
* EncryptionOperationsTest.AesEcbNoPaddingWrongInputSize
*
* Verifies that AES encryption fails in the correct way when provided an input that is not a
@@ -3225,6 +3260,92 @@
}
/*
+ * EncryptionOperationsTest.AesGcmRoundTripWithDelaySuccess
+ *
+ * Verifies that AES GCM mode works, even when there's a long delay
+ * between operations.
+ */
+TEST_F(EncryptionOperationsTest, AesGcmRoundTripWithDelaySuccess) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)));
+
+ string aad = "foobar";
+ string message = "123456789012345678901234567890123456";
+
+ auto begin_params = AuthorizationSetBuilder()
+ .BlockMode(BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MAC_LENGTH, 128);
+
+ auto update_params =
+ AuthorizationSetBuilder().Authorization(TAG_ASSOCIATED_DATA, aad.data(), aad.size());
+
+ // Encrypt
+ AuthorizationSet begin_out_params;
+ ASSERT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, begin_params, &begin_out_params))
+ << "Begin encrypt";
+ string ciphertext;
+ AuthorizationSet update_out_params;
+ sleep(5);
+ ASSERT_EQ(ErrorCode::OK,
+ Finish(op_handle_, update_params, message, "", &update_out_params, &ciphertext));
+
+ ASSERT_EQ(ciphertext.length(), message.length() + 16);
+
+ // Grab nonce
+ begin_params.push_back(begin_out_params);
+
+ // Decrypt.
+ ASSERT_EQ(ErrorCode::OK, Begin(KeyPurpose::DECRYPT, begin_params)) << "Begin decrypt";
+ string plaintext;
+ size_t input_consumed;
+ sleep(5);
+ ASSERT_EQ(ErrorCode::OK, Update(op_handle_, update_params, ciphertext, &update_out_params,
+ &plaintext, &input_consumed));
+ EXPECT_EQ(ciphertext.size(), input_consumed);
+ sleep(5);
+ EXPECT_EQ(ErrorCode::OK, Finish("", &plaintext));
+ EXPECT_EQ(message.length(), plaintext.length());
+ EXPECT_EQ(message, plaintext);
+}
+
+/*
+ * EncryptionOperationsTest.AesGcmDifferentNonces
+ *
+ * Verifies that encrypting the same data with different nonces produces different outputs.
+ */
+TEST_F(EncryptionOperationsTest, AesGcmDifferentNonces) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .AesEncryptionKey(128)
+ .Authorization(TAG_BLOCK_MODE, BlockMode::GCM)
+ .Padding(PaddingMode::NONE)
+ .Authorization(TAG_MIN_MAC_LENGTH, 128)
+ .Authorization(TAG_CALLER_NONCE)));
+
+ string aad = "foobar";
+ string message = "123456789012345678901234567890123456";
+ string nonce1 = "000000000000";
+ string nonce2 = "111111111111";
+ string nonce3 = "222222222222";
+
+ string ciphertext1 =
+ EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128, HidlBuf(nonce1));
+ string ciphertext2 =
+ EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128, HidlBuf(nonce2));
+ string ciphertext3 =
+ EncryptMessage(message, BlockMode::GCM, PaddingMode::NONE, 128, HidlBuf(nonce3));
+
+ ASSERT_NE(ciphertext1, ciphertext2);
+ ASSERT_NE(ciphertext1, ciphertext3);
+ ASSERT_NE(ciphertext2, ciphertext3);
+}
+
+/*
* EncryptionOperationsTest.AesGcmTooShortTag
*
* Verifies that AES GCM mode fails correctly when a too-short tag length is specified.
@@ -4456,6 +4577,84 @@
EXPECT_EQ(result, std::make_pair(ErrorCode::OK, HidlBuf()));
}
+
+using ClearOperationsTest = KeymasterHidlTest;
+
+/*
+ * ClearSlotsTest.TooManyOperations
+ *
+ * Verifies that TOO_MANY_OPERATIONS is returned after the max number of
+ * operations are started without being finished or aborted. Also verifies
+ * that aborting the operations clears the operations.
+ *
+ */
+TEST_F(ClearOperationsTest, TooManyOperations) {
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .RsaEncryptionKey(2048, 65537)
+ .Padding(PaddingMode::NONE)));
+
+ auto params = AuthorizationSetBuilder().Padding(PaddingMode::NONE);
+ int max_operations = SecLevel() == SecurityLevel::STRONGBOX ? 4 : 16;
+ OperationHandle op_handles[max_operations];
+ AuthorizationSet out_params;
+ for(int i=0; i<max_operations; i++) {
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &(op_handles[i])));
+ }
+ EXPECT_EQ(ErrorCode::TOO_MANY_OPERATIONS,
+ Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &op_handle_));
+ // Try again just in case there's a weird overflow bug
+ EXPECT_EQ(ErrorCode::TOO_MANY_OPERATIONS,
+ Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &op_handle_));
+ for(int i=0; i<max_operations; i++) {
+ EXPECT_EQ(ErrorCode::OK, Abort(op_handles[i]));
+ }
+ EXPECT_EQ(ErrorCode::OK,
+ Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &op_handle_));
+ AbortIfNeeded();
+}
+
+/*
+ * ClearSlotsTest.ServiceDeath
+ *
+ * Verifies that the service is restarted after death and the ongoing
+ * operations are cleared.
+ */
+TEST_F(ClearOperationsTest, ServiceDeath) {
+
+ ASSERT_EQ(ErrorCode::OK, GenerateKey(AuthorizationSetBuilder()
+ .Authorization(TAG_NO_AUTH_REQUIRED)
+ .RsaEncryptionKey(2048, 65537)
+ .Padding(PaddingMode::NONE)));
+
+ auto params = AuthorizationSetBuilder().Padding(PaddingMode::NONE);
+ int max_operations = SecLevel() == SecurityLevel::STRONGBOX ? 4 : 16;
+ OperationHandle op_handles[max_operations];
+ AuthorizationSet out_params;
+ for(int i=0; i<max_operations; i++) {
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &(op_handles[i])));
+ }
+ EXPECT_EQ(ErrorCode::TOO_MANY_OPERATIONS,
+ Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &op_handle_));
+
+ DebugInfo debug_info;
+ GetDebugInfo(&debug_info);
+ kill(debug_info.pid, SIGKILL);
+ // wait 1 second for keymaster to restart
+ sleep(1);
+ InitializeKeymaster();
+
+ for(int i=0; i<max_operations; i++) {
+ EXPECT_EQ(ErrorCode::OK, Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &(op_handles[i])));
+ }
+ EXPECT_EQ(ErrorCode::TOO_MANY_OPERATIONS,
+ Begin(KeyPurpose::ENCRYPT, key_blob_, params, &out_params, &op_handle_));
+ for(int i=0; i<max_operations; i++) {
+ EXPECT_EQ(ErrorCode::OK, Abort(op_handles[i]));
+ }
+}
+
+
} // namespace test
} // namespace V4_0
} // namespace keymaster
diff --git a/light/2.0/vts/functional/Android.bp b/light/2.0/vts/functional/Android.bp
index 9f03d27..2c0a08f 100644
--- a/light/2.0/vts/functional/Android.bp
+++ b/light/2.0/vts/functional/Android.bp
@@ -19,6 +19,6 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: ["VtsHalLightV2_0TargetTest.cpp"],
static_libs: ["android.hardware.light@2.0"],
- test_suites: ["general-tests"],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/light/utils/main.cpp b/light/utils/main.cpp
index d07e799..b834132 100644
--- a/light/utils/main.cpp
+++ b/light/utils/main.cpp
@@ -25,7 +25,7 @@
std::cerr << msg << std::endl;
}
-int main() {
+int main(int argc, char* argv[]) {
using ::android::hardware::hidl_vec;
using ::android::hardware::light::V2_0::Brightness;
using ::android::hardware::light::V2_0::Flash;
@@ -41,10 +41,29 @@
return -1;
}
- const static LightState off = {
- .color = 0u, .flashMode = Flash::NONE, .brightnessMode = Brightness::USER,
+ static LightState off = {
+ .color = 0u,
+ .flashMode = Flash::NONE,
+ .brightnessMode = Brightness::USER,
};
+ if (argc > 2) {
+ error("Usage: blank_screen [color]");
+ return -1;
+ }
+
+ if (argc > 1) {
+ char* col_ptr;
+ unsigned int col_new;
+
+ col_new = strtoul(argv[1], &col_ptr, 0);
+ if (*col_ptr != '\0') {
+ error("Failed to convert " + std::string(argv[1]) + " to number");
+ return -1;
+ }
+ off.color = col_new;
+ }
+
service->getSupportedTypes([&](const hidl_vec<Type>& types) {
for (Type type : types) {
Status ret = service->setLight(type, off);
diff --git a/neuralnetworks/1.0/types.hal b/neuralnetworks/1.0/types.hal
index 02db063..ba9d068 100644
--- a/neuralnetworks/1.0/types.hal
+++ b/neuralnetworks/1.0/types.hal
@@ -25,25 +25,24 @@
* with at least one dimension). Types not prefaced by TENSOR_* represent
* scalar values and must have no dimensions.
*
- * Although many types are defined, most operators accept just a few
+ * Although we define many types, most operators accept just a few
* types. Most used are {@link OperandType::TENSOR_FLOAT32},
* {@link OperandType::TENSOR_QUANT8_ASYMM},
* and {@link OperandType::INT32}.
*/
enum OperandType : int32_t {
/** A 32 bit floating point scalar value. */
- FLOAT32 = 0,
+ FLOAT32 = 0,
/** A signed 32 bit integer scalar value. */
- INT32 = 1,
+ INT32 = 1,
/** An unsigned 32 bit integer scalar value. */
- UINT32 = 2,
-
+ UINT32 = 2,
/** A tensor of 32 bit floating point values. */
- TENSOR_FLOAT32 = 3,
+ TENSOR_FLOAT32 = 3,
/** A tensor of 32 bit integer values. */
- TENSOR_INT32 = 4,
+ TENSOR_INT32 = 4,
/**
- * A tensor of 8 bit integers that represent real numbers.
+ * A tensor of 8 bit unsigned integers that represent real numbers.
*
* Attached to this tensor are two numbers that can be used to convert the
* 8 bit integer to the real value and vice versa. These two numbers are:
@@ -51,21 +50,21 @@
* - zeroPoint: a 32 bit integer, in range [0, 255].
*
* The formula is:
- * real_value = (integer_value - zeroPoint) * scale.
+ * real_value = (integer_value - zeroPoint) * scale.
*/
TENSOR_QUANT8_ASYMM = 5,
/**
- * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
- * OEM operation and data types.
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
*
* OEM specific scalar value.
*/
OEM = 10000,
/**
- * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
- * OEM operation and data types.
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
*
* A tensor of OEM specific values.
*/
@@ -78,7 +77,6 @@
* The type of an operation in a model.
*/
enum OperationType : int32_t {
-
/**
* Adds two tensors, element-wise.
*
@@ -110,14 +108,16 @@
* * 0: A tensor.
* * 1: A tensor of the same {@link OperandType}, and compatible dimensions
* as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
*
* Outputs:
* * 0: The sum, a tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
ADD = 0,
@@ -187,8 +187,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
AVERAGE_POOL_2D = 1,
@@ -206,22 +206,23 @@
*
* Inputs:
* * 0 ~ n-1: The list of n input tensors, of shape
- * [D0, D1, ..., Daxis(i), ..., Dm]. For inputs of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, all input tensors
- * must have the same scale and zeroPoint.
+ * [D0, D1, ..., Daxis(i), ..., Dm].
+ * All input tensors of
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * must have the same scale and zeroPoint as the output tensor.
* * n: An {@link OperandType::INT32} scalar, specifying the
* concatenation axis.
*
* Outputs:
* * 0: The output, a tensor of the same {@link OperandType} as the input
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor, the scale and zeroPoint
+ * values must be the same as the input tensors'.
*/
CONCATENATION = 2,
/**
- * Performs an 2-D convolution operation.
+ * Performs a 2-D convolution operation.
*
* The CONV_2D op sweeps a 2-D filter that can mix channels together over a
* batch of images, applying the filter to each window of each image of the
@@ -238,11 +239,17 @@
* filter[channel, di, dj, k]
* ) + bias[channel]
*
- * Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Both explicit padding and implicit padding are supported.
*
@@ -252,12 +259,12 @@
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter.
- * * 2: A 1-D tensor, of shape [depth_out], specifying the bias.
- * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
- * should also be of {@link OperandType::TENSOR_FLOAT32}. For input
- * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias
- * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale == input_scale * filter_scale.
+ * * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 4: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -281,11 +288,11 @@
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -299,11 +306,9 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale.
- *
- * Available since API level 27.
+ * [batches, out_height, out_width, depth_out].
+ * For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied: output_scale > input_scale * filter_scale
*/
CONV_2D = 3,
@@ -329,11 +334,17 @@
* filter[1, di, dj, k * channel_multiplier + q]
* ) + bias[k * channel_multiplier + q]
*
- * Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * Supported tensor {@link OperandType} configurations:
+ * * 32 bit floating point:
+ * * * {@link OperandType::TENSOR_FLOAT32} for input, filter, output, and bias.
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * * Quantized:
+ * * * {@link OperandType::TENSOR_QUANT8_ASYMM} for input, filter, and output.
+ * * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
+ * * * input.scale * filter.scale).
+ *
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Both explicit padding and implicit padding are supported.
*
@@ -343,11 +354,11 @@
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 4: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -372,11 +383,11 @@
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of {@link OperandType::TENSOR_FLOAT32}, the bias should
- * also be of {@link OperandType::TENSOR_FLOAT32}. For input tensor
- * of {@link OperandType::TENSOR_QUANT8_ASYMM}, the bias should be
- * of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0 and
- * bias_scale == input_scale * filter_scale.
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * the bias must be of the same
+ * type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
+ * of 0 and bias_scale == input_scale * filter_scale.
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -392,11 +403,10 @@
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. For output tensor of
- * {@link OperandType::TENSOR_QUANT8_ASYMM}, the following condition
- * must be satisfied: output_scale > input_scale * filter_scale.
- *
- * Available since API level 27.
+ * [batches, out_height, out_width, depth_out]. For
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
+ * output_scale > input_scale * filter_scale
*/
DEPTHWISE_CONV_2D = 4,
@@ -419,7 +429,8 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -431,8 +442,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape [batch, height*block_size,
* width*block_size, depth/(block_size*block_size)].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
DEPTH_TO_SPACE = 5,
@@ -443,19 +454,19 @@
*
* output = (input - zeroPoint) * scale.
*
- * Supported tensor {@link OperandType}:
+ * Supported input tensor {@link OperandType}:
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
+ * Supported output tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}.
+ *
* Supported tensor rank: up to 4
*
* Inputs:
- * * 0: A tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}.
+ * * 0: A tensor.
*
* Outputs:
- * * 0: The output tensor of same shape as input0, but with
- * {@link OperandType::TENSOR_FLOAT32}.
- *
- * Available since API level 27.
+ * * 0: A tensor with the same shape as input0.
*/
DEQUANTIZE = 6,
@@ -479,6 +490,13 @@
* If a value in Lookups is out of bounds, the operation must fail
* and an error must be reported.
*
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
* Inputs:
* * 0: Lookups. A 1-D tensor of {@link OperandType::TENSOR_INT32}.
* The values are indices into the first dimension of Values.
@@ -489,8 +507,8 @@
* * 0: A n-D tensor with the same rank and shape as the Values
* tensor, except for the first dimension which has the same size
* as Lookups' only dimension.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input1.
*/
EMBEDDING_LOOKUP = 7,
@@ -508,8 +526,6 @@
* Outputs:
* * 0: The output tensor, of the same {@link OperandType} and dimensions as
* the input tensor.
- *
- * Available since API level 27.
*/
FLOOR = 8,
@@ -549,12 +565,9 @@
* invoke on the result.
*
* Outputs:
- * * 0: The output tensor, of shape [batch_size, num_units]. For output
- * tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
- * condition must be satisfied:
- * output_scale > input_scale * filter_scale.
- *
- * Available since API level 27.
+ * * 0: The output tensor, of shape [batch_size, num_units]. For
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
*/
FULLY_CONNECTED = 9,
@@ -585,6 +598,13 @@
* must be selected. If no entry in Keys has 123456, a slice of zeroes
* must be concatenated.
*
+ * Supported value tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported value tensor rank: from 2
+ *
* Inputs:
* * 0: Lookups. A 1-D {@link OperandType::TENSOR_INT32} tensor with
* shape [ k ].
@@ -598,13 +618,13 @@
*
* Outputs:
* * 0: Output. A tensor with shape [ k …].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input2.
* * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
* hits (True) or not (False).
* Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
* and scale 1.0f.
* A non-zero byte represents True, a hit. A zero indicates otherwise.
- *
- * Available since API level 27.
*/
HASHTABLE_LOOKUP = 10,
@@ -617,9 +637,6 @@
* input[batch, row, col, channel] /
* sqrt(sum_{c} pow(input[batch, row, col, c], 2))
*
- * For input tensor with more dimensions, independently normalizes each 1-D
- * slice along dimension dim.
- *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
@@ -627,13 +644,10 @@
* Height, Width, and Channels).
*
* Inputs:
- * * 0: A 4-D tensor, of shape [batches, height, width, depth].
+ * * 0: A 4-D tensor, specifying the tensor to be normalized.
*
* Outputs:
- * * 0: The output 4-D tensor, of the same shape as input
- * [batches, height, width, depth].
- *
- * Available since API level 27.
+ * * 0: A tensor of the same {@link OperandType} and same shape as input0.
*/
L2_NORMALIZATION = 11,
@@ -652,7 +666,8 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Both explicit padding and implicit padding are supported.
*
@@ -700,8 +715,6 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
*/
L2_POOL_2D = 12,
@@ -729,17 +742,18 @@
* the input.
* * 1: An {@link OperandType::INT32} scalar, specifying the radius of
* the normalization window.
- * * 2: An {@link OperandType::FLOAT32} scalar, specifying the bias, must
- * not be zero.
- * * 3: An {@link OperandType::FLOAT32} scalar, specifying the scale
- * factor, alpha.
- * * 4: An {@link OperandType::FLOAT32} scalar, specifying the exponent,
- * beta.
+ * * 2: A scalar, specifying the bias, must not be zero.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
+ * value must be of {@link OperandType::FLOAT32}.
+ * * 3: A scalar, specifying the scale factor, alpha.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * alpha value must be of {@link OperandType::FLOAT32}.
+ * * 4: A scalar, specifying the exponent, beta.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the beta
+ * value must be of {@link OperandType::FLOAT32}.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
*/
LOCAL_RESPONSE_NORMALIZATION = 13,
@@ -763,45 +777,53 @@
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
- *
- * Available since API level 27.
*/
LOGISTIC = 14,
/**
* Projects an input to a bit vector via locality senstive hashing.
*
+ * Supported input tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_INT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
+ * Supported input tensor rank: from 1
+ *
* Inputs:
* * 0: Hash functions. Dim.size == 2, DataType: Float.
- * Tensor[0].Dim[0]: Number of hash functions.
- * Tensor[0].Dim[1]: Number of seeds per hash functions.
- * Tensor[0].Dim[1] <= 32 in sparse case.
+ * Tensor[0].Dim[0]: Number of hash functions.
+ * Tensor[0].Dim[1]: Number of projected output bits generated by each
+ * hash function.
+ * If the projection type is Sparse:
+ * Tensor[0].Dim[1] + ceil(log2(Tensor[0].Dim[0])) <= 32
*
* * 1: Input. Dim.size >= 1, no restriction on DataType.
* * 2: Weight. Optional. Dim.size == 1, DataType: Float.
- * If not set, each input element is considered to have the same weight
- * of 1.0.
- * Tensor[1].Dim[0] == Tensor[2].Dim[0]
+ * If not set, each input element is considered to have the same weight
+ * of 1.0.
+ * Tensor[1].Dim[0] == Tensor[2].Dim[0]
* * 3: Type:
- * Sparse: Value LSHProjectionType_SPARSE(=1).
+ * Sparse:
+ * Value LSHProjectionType_SPARSE(=1).
* Computed bit vector is considered to be sparse.
* Each output element is an int32 made up of multiple bits
* computed from hash functions.
*
- * Dense: Value LSHProjectionType_DENSE(=2).
+ * Dense:
+ * Value LSHProjectionType_DENSE(=2).
* Computed bit vector is considered to be dense. Each output
* element represents a bit and can take the value of either
* 0 or 1.
*
* Outputs:
- * * 0: If the projection type is sparse:
- * Output.Dim == { Tensor[0].Dim[0] }
- * A tensor of int32 that represents hash signatures.
- * If the projection type is Dense:
- * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
- * A flattened tensor that represents projected bit vectors.
+ * * 0: If the projection type is Sparse:
+ * Output.Dim == { Tensor[0].Dim[0] }
+ * A tensor of int32 that represents hash signatures.
*
- * Available since API level 27.
+ * If the projection type is Dense:
+ * Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
+ * A flattened tensor that represents projected bit vectors.
*/
LSH_PROJECTION = 15,
@@ -901,71 +923,54 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
+ * All input and output tensors must be of the same type.
+ *
* Inputs:
* * 0: The input (\f$x_t\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, input_size], where “batch_size” corresponds to the
- * batching dimension, and “input_size” is the size of the input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
* * 1: The input-to-input weights (\f$W_{xi}\f$). Optional.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size], where “num_units” corresponds to the
- * number of cell units.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of cell units.
* * 2: The input-to-forget weights (\f$W_{xf}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size].
+ * A 2-D tensor of shape [num_units, input_size].
* * 3: The input-to-cell weights (\f$W_{xc}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size].
+ * A 2-D tensor of shape [num_units, input_size].
* * 4: The input-to-output weights (\f$W_{xo}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size].
+ * A 2-D tensor of shape [num_units, input_size].
* * 5: The recurrent-to-input weights (\f$W_{hi}\f$). Optional.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, output_size], where “output_size” corresponds to either
- * the number of cell units (i.e., “num_units”), or the second
- * dimension of the “projection_weights”, if defined.
+ * A 2-D tensor of shape [num_units, output_size], where “output_size”
+ * corresponds to either the number of cell units (i.e., “num_units”),
+ * or the second dimension of the “projection_weights”, if defined.
* * 6: The recurrent-to-forget weights (\f$W_{hf}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, output_size].
+ * A 2-D tensor of shape [num_units, output_size].
* * 7: The recurrent-to-cell weights (\f$W_{hc}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, output_size].
+ * A 2-D tensor of shape [num_units, output_size].
* * 8: The recurrent-to-output weights (\f$W_{ho}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, output_size].
+ * A 2-D tensor of shape [num_units, output_size].
* * 9: The cell-to-input weights (\f$W_{ci}\f$). Optional.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 10:The cell-to-forget weights (\f$W_{cf}\f$). Optional.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 11:The cell-to-output weights (\f$W_{co}\f$). Optional.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 12:The input gate bias (\f$b_i\f$). Optional.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 13:The forget gate bias (\f$b_f\f$).
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 14:The cell bias (\f$b_c\f$).
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 15:The output gate bias (\f$b_o\f$).
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 16:The projection weights (\f$W_{proj}\f$). Optional.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [output_size, num_units].
+ * A 2-D tensor of shape [output_size, num_units].
* * 17:The projection bias (\f$b_{proj}\f$). Optional.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [output_size].
+ * A 1-D tensor of shape [output_size].
* * 18:The output state (in) (\f$h_{t-1}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, output_size].
* * 19:The cell state (in) (\f$C_{t-1}\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
* * 20:The activation function (\f$g\f$).
* A value indicating the activation function:
* <ul>
@@ -984,21 +989,15 @@
*
* Outputs:
* * 0: The scratch buffer.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units * 3] with CIFG, or
+ * A 2-D tensor of shape [batch_size, num_units * 3] with CIFG, or
* [batch_size, num_units * 4] without CIFG.
* * 1: The output state (out) (\f$h_t\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, output_size].
+ * A 2-D tensor of shape [batch_size, output_size].
* * 2: The cell state (out) (\f$C_t\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
* * 3: The output (\f$o_t\f$).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, output_size]. This is effectively the same as the
- * current “output state (out)” value.
- *
- * Available since API level 27.
+ * A 2-D tensor of shape [batch_size, output_size]. This is effectively
+ * the same as the current “output state (out)” value.
*/
LSTM = 16,
@@ -1019,7 +1018,8 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Both explicit padding and implicit padding are supported.
*
@@ -1067,8 +1067,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
MAX_POOL_2D = 17,
@@ -1106,8 +1106,6 @@
* For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the following condition must be satisfied:
* output_scale > input1_scale * input2_scale.
- *
- * Available since API level 27.
*/
MUL = 18,
@@ -1129,8 +1127,8 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU = 19,
@@ -1151,9 +1149,9 @@
* * 0: A tensor, specifying the input.
*
* Outputs:
- * * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU1 = 20,
@@ -1175,8 +1173,8 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU6 = 21,
@@ -1205,8 +1203,8 @@
*
* Outputs:
* * 0: The output tensor, of shape specified by the input shape.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RESHAPE = 22,
@@ -1220,9 +1218,10 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
- * Inputs:
+ * Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
* the input.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
@@ -1233,8 +1232,6 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
- *
- * Available since API level 27.
*/
RESIZE_BILINEAR = 23,
@@ -1257,25 +1254,23 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
+ * The input tensors must all be the same type.
+ *
* Inputs:
* * 0: input.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32} of shape
- * [batch_size, input_size], where “batch_size” corresponds to the
- * batching dimension, and “input_size” is the size of the input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
* * 1: weights.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size], where “num_units” corresponds to the
- * number of units.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
* * 2: recurrent_weights.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, num_units], with columns corresponding to the weights
- * from each unit.
+ * A 2-D tensor of shape [num_units, num_units], with columns
+ * corresponding to the weights from each unit.
* * 3: bias.
- * A 1-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units].
+ * A 1-D tensor of shape [num_units].
* * 4: hidden state (in).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
* * 5: fused_activation_function.
* An optional {@link FusedActivationFunc} value indicating the
* activation function. If “NONE” is specified then it results in a
@@ -1283,15 +1278,11 @@
*
* Outputs:
* * 0: hidden state (out).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units].
+ * A 2-D tensor of shape [batch_size, num_units].
*
* * 1: output.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, num_units]. This is effectively the same as the
- * current state value.
- *
- * Available since API level 27.
+ * A 2-D tensor of shape [batch_size, num_units]. This is effectively
+ * the same as the current state value.
*/
RNN = 24,
@@ -1306,6 +1297,9 @@
* exp((input[batch, i] - max(input[batch, :])) * beta) /
* sum_{k}{exp((input[batch, k] - max(input[batch, :])) * beta)}
*
+ * For input tensor with rank other than 2, the activation will be applied
+ * independently on each 1-D slice along specified dimension.
+ *
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
@@ -1314,15 +1308,15 @@
*
* Inputs:
* * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
- * * 1: An {@link OperandType::FLOAT32} scalar, specifying the positive
- * scaling factor for the exponent, beta.
+ * * 1: A scalar, specifying the positive scaling factor for the exponent,
+ * beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
+ * {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
+ * {@link OperandType::FLOAT32}.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
- *
- * Available since API level 27.
*/
SOFTMAX = 25,
@@ -1344,7 +1338,8 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4, with "NHWC" data layout.
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -1356,8 +1351,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
* width/block_size, depth_in*block_size*block_size].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SPACE_TO_DEPTH = 26,
@@ -1403,25 +1398,23 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
*
+ * All input tensors must be the same type.
+ *
* Inputs:
* * 0: input.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, input_size], where “batch_size” corresponds to the
- * batching dimension, and “input_size” is the size of the input.
+ * A 2-D tensor of shape [batch_size, input_size], where “batch_size”
+ * corresponds to the batching dimension, and “input_size” is the size
+ * of the input.
* * 1: weights_feature.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, input_size], where “num_units” corresponds to the
- * number of units.
+ * A 2-D tensor of shape [num_units, input_size], where “num_units”
+ * corresponds to the number of units.
* * 2: weights_time.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [num_units, memory_size], where “memory_size” corresponds to the
- * fixed-size of the memory.
+ * A 2-D tensor of shape [num_units, memory_size], where “memory_size”
+ * corresponds to the fixed-size of the memory.
* * 3: bias.
- * An optional 1-D tensor of {@link OperandType::TENSOR_FLOAT32},
- * of shape [num_units].
+ * An optional 1-D tensor of shape [num_units].
* * 4: state (in).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
- * [batch_size, (memory_size - 1) * num_units * rank].
+ * A 2-D tensor of shape [batch_size, (memory_size - 1) * num_units * rank].
* * 5: rank.
* The rank of the SVD approximation.
* * 6: fused_activation_function.
@@ -1431,13 +1424,11 @@
*
* Outputs:
* * 0: state (out).
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
* [batch_size, (memory_size - 1) * num_units * rank].
* * 1: output.
- * A 2-D tensor of {@link OperandType::TENSOR_FLOAT32}, of shape
+ * A 2-D tensor of the same {@link OperandType} as the inputs, with shape
* [batch_size, num_units].
- *
- * Available since API level 27.
*/
SVDF = 27,
@@ -1458,8 +1449,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
*/
TANH = 28,
diff --git a/neuralnetworks/1.0/types.t b/neuralnetworks/1.0/types.t
new file mode 100644
index 0000000..d7b26aa
--- /dev/null
+++ b/neuralnetworks/1.0/types.t
@@ -0,0 +1,431 @@
+%% template file for generating types.hal.
+%% see frameworks/ml/nn/tools/api/README.md.
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.0;
+
+%insert Operand_1.0_Comment
+enum OperandType : int32_t {
+%insert Operand_1.0
+
+ /**
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * OEM specific scalar value.
+ */
+ OEM = 10000,
+
+ /**
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * A tensor of OEM specific values.
+ */
+ TENSOR_OEM_BYTE = 10001,
+};
+
+%insert Operation_1.0_Comment
+enum OperationType : int32_t {
+%insert Operation_1.0
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = 10000,
+};
+
+/**
+ * Fused activation function types.
+ */
+enum FusedActivationFunc : int32_t {
+ NONE = 0,
+ RELU = 1,
+ RELU1 = 2,
+ RELU6 = 3,
+};
+
+/**
+ * How an operand is used.
+ */
+enum OperandLifeTime : int32_t {
+ /**
+ * The operand is internal to the model. It's created by an operation and
+ * consumed by other operations. It must be an output operand of
+ * exactly one operation.
+ */
+ TEMPORARY_VARIABLE,
+
+ /**
+ * The operand is an input of the model. It must not be an output
+ * operand of any operation.
+ *
+ * An operand can't be both input and output of a model.
+ */
+ MODEL_INPUT,
+
+ /**
+ * The operand is an output of the model. It must be an output
+ * operand of exactly one operation.
+ *
+ * An operand can't be both input and output of a model.
+ */
+ MODEL_OUTPUT,
+
+ /**
+ * The operand is a constant found in Model.operandValues. It must
+ * not be an output operand of any operation.
+ */
+ CONSTANT_COPY,
+
+ /**
+ * The operand is a constant that was specified via a Memory
+ * object. It must not be an output operand of any operation.
+ */
+ CONSTANT_REFERENCE,
+
+ /**
+ * The operand does not have a value. This is valid only for optional
+ * arguments of operations.
+ */
+ NO_VALUE,
+};
+
+/**
+ * Status of a device.
+ */
+enum DeviceStatus : int32_t {
+ AVAILABLE,
+ BUSY,
+ OFFLINE,
+ UNKNOWN,
+};
+
+/**
+ * Performance information for the reference workload.
+ *
+ * Used by a driver to report its performance characteristics.
+ */
+struct PerformanceInfo {
+ /**
+ * Ratio of the time taken by the driver to execute the
+ * workload compared to the time the CPU would take for the
+ * same workload. A lower number is better.
+ */
+ float execTime;
+
+ /**
+ * Ratio of the energy used by the driver compared to what
+ * the CPU would use for doing the same workload. A lower number
+ * is better.
+ */
+ float powerUsage;
+};
+
+/**
+ * The capabilities of a driver.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data.
+ */
+ PerformanceInfo float32Performance;
+
+ /**
+ * Driver performance when operating on asymmetric 8-bit quantized data.
+ */
+ PerformanceInfo quantized8Performance;
+};
+
+/**
+ * Describes the location of a data object.
+ */
+struct DataLocation {
+ /**
+ * The index of the memory pool where this location is found.
+ */
+ uint32_t poolIndex;
+
+ /**
+ * Offset in bytes from the start of the pool.
+ */
+ uint32_t offset;
+
+ /**
+ * The length of the data in bytes.
+ */
+ uint32_t length;
+};
+
+/**
+ * Describes one operand of the model's graph.
+ */
+struct Operand {
+ /**
+ * Data type of the operand.
+ */
+ OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ *
+ * For a scalar operand, dimensions.size() must be 0.
+ *
+ * For a tensor operand, dimensions.size() must be at least 1;
+ * however, any of the dimensions may be unspecified.
+ *
+ * A tensor operand with all dimensions specified has "fully
+ * specified" dimensions. Whenever possible (i.e., whenever the
+ * dimensions are known at model construction time), a tensor
+ * operand should have (but is not required to have) fully
+ * specified dimensions, in order to enable the best possible
+ * performance.
+ *
+ * If a tensor operand's dimensions are not fully specified, the
+ * dimensions of the operand are deduced from the operand
+ * dimensions and values of the operation for which that operand
+ * is an output.
+ *
+ * In the following situations, a tensor operand's dimensions must
+ * be fully specified:
+ *
+ * . The operand has lifetime CONSTANT_COPY or
+ * CONSTANT_REFERENCE.
+ *
+ * . The operand has lifetime MODEL_INPUT or MODEL_OUTPUT. Fully
+ * specified dimensions must either be present in the
+ * Operand or they must be provided in the corresponding
+ * RequestArgument.
+ * EXCEPTION: If the input or output is optional and omitted
+ * (by setting the hasNoValue field of the corresponding
+ * RequestArgument to true) then it need not have fully
+ * specified dimensions.
+ *
+ * A tensor operand with some number of unspecified dimensions is
+ * represented by setting each unspecified dimension to 0.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
+ */
+ uint32_t numberOfConsumers;
+
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
+ */
+ float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
+ int32_t zeroPoint;
+
+ /**
+ * How the operand is used.
+ */
+ OperandLifeTime lifetime;
+
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
+ * NO_VALUE:
+ * - All the fields must be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
+ DataLocation location;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * might not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+};
+
+/**
+ * Metadata information specifying the location of the input or output data and
+ * any updates to the input or output operand.
+ */
+struct RequestArgument {
+ /**
+ * If true, the argument does not have a value. This can be used for
+ * operations that take optional arguments. If true, the fields of location
+ * are set to 0 and the dimensions vector is left empty.
+ */
+ bool hasNoValue;
+
+ /**
+ * The location within one of the memory pools passed in the Request.
+ */
+ DataLocation location;
+
+ /**
+ * Updated dimension information.
+ *
+ * If dimensions.size() > 0, dimension information was provided
+ * along with the argument. This can be the case for models that
+ * accept inputs of varying size. This can't change the rank, just
+ * the value of the dimensions that were unspecified in the
+ * model. If dimensions.size() > 0, then all dimensions must be
+ * specified here; and any dimension that was specified in the
+ * model must have the same value here.
+ *
+ * If the dimensions in the model are not fully specified, then
+ * they must be fully specified here, unless hasNoValue is set to
+ * true. If the dimensions in the model are fully specified, then
+ * either dimensions.size() may be 0, or the dimensions in the
+ * model must be identical to the dimensions here.
+ */
+ vec<uint32_t> dimensions;
+};
+
+/**
+ * Inputs to be sent to and outputs to be retrieved from a prepared model.
+ *
+ * A Request serves two primary tasks:
+ * 1) Provides the input and output data to be used when executing the model.
+ * 2) Specifies any updates to the input operand metadata that were left
+ * unspecified at model preparation time.
+ *
+ * An output must not overlap with any other output, with an input, or
+ * with an operand of lifetime CONSTANT_REFERENCE.
+ */
+struct Request {
+ /**
+ * Input data and information to be used in the execution of a prepared
+ * model.
+ *
+ * The index of the input corresponds to the index in Model.inputIndexes.
+ * E.g., input[i] corresponds to Model.inputIndexes[i].
+ */
+ vec<RequestArgument> inputs;
+
+ /**
+ * Output data and information to be used in the execution of a prepared
+ * model.
+ *
+ * The index of the output corresponds to the index in Model.outputIndexes.
+ * E.g., output[i] corresponds to Model.outputIndexes[i].
+ */
+ vec<RequestArgument> outputs;
+
+ /**
+ * A collection of shared memory pools containing operand data for both the
+ * inputs and the outputs to a model.
+ */
+ vec<memory> pools;
+};
+
+/**
+ * Return status of a function.
+ */
+enum ErrorStatus : int32_t {
+ NONE,
+ DEVICE_UNAVAILABLE,
+ GENERAL_FAILURE,
+ OUTPUT_INSUFFICIENT_SIZE,
+ INVALID_ARGUMENT,
+};
diff --git a/neuralnetworks/1.0/vts/functional/Android.bp b/neuralnetworks/1.0/vts/functional/Android.bp
index 0af7f79..ba9fb45 100644
--- a/neuralnetworks/1.0/vts/functional/Android.bp
+++ b/neuralnetworks/1.0/vts/functional/Android.bp
@@ -40,10 +40,11 @@
],
}
-cc_defaults {
- name: "VtsHalNeuralNetworksV1_0TargetTestDefaults",
+cc_test {
+ name: "VtsHalNeuralnetworksV1_0TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
+ "BasicTests.cpp",
"TestAssertions.cpp",
"ValidateModel.cpp",
"ValidateRequest.cpp",
@@ -64,33 +65,11 @@
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
+ whole_static_libs: [
+ "neuralnetworks_generated_V1_0_example",
+ ],
header_libs: [
"libneuralnetworks_headers",
],
- test_suites: ["general-tests"],
-}
-
-cc_test {
- name: "VtsHalNeuralnetworksV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- ],
-}
-
-cc_test {
- name: "PresubmitHalNeuralnetworksV1_0TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_0TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- ],
- cflags: [
- "-DPRESUBMIT_NOT_VTS",
- ],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/neuralnetworks/1.0/vts/functional/BasicTests.cpp b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
index 551ea67..cc44c9e 100644
--- a/neuralnetworks/1.0/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.0/vts/functional/BasicTests.cpp
@@ -21,17 +21,17 @@
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
// create device test
-TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
+TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
// status test
-TEST_F(NeuralnetworksHidlTest, StatusTest) {
+TEST_P(NeuralnetworksHidlTest, StatusTest) {
Return<DeviceStatus> status = kDevice->getStatus();
ASSERT_TRUE(status.isOk());
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
// initialization
-TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
Return<void> ret =
kDevice->getCapabilities([](ErrorStatus status, const Capabilities& capabilities) {
EXPECT_EQ(ErrorStatus::NONE, status);
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index 1948c05..595ad85 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -148,6 +148,20 @@
checkResults(testModel, outputs);
}
+void GeneratedTestBase::SetUp() {
+ testing::TestWithParam<GeneratedTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
+}
+
+std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
+ return TestModelManager::get().getTestModels(filter);
+}
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
+ const auto& [namedDevice, namedModel] = info.param;
+ return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
+}
+
// Tag for the generated tests
class GeneratedTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
index 10e46b7..f230a02 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -18,29 +18,38 @@
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <functional>
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
-class GeneratedTestBase
- : public NeuralnetworksHidlTest,
- public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+using NamedModel = Named<const test_helper::TestModel*>;
+using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
+
+class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
protected:
- const test_helper::TestModel& kTestModel = *GetParam().second;
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
+ const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
};
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
- INSTANTIATE_TEST_SUITE_P( \
- TestGenerated, TestSuite, \
- testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
- [](const auto& info) { return info.param.first; })
+using FilterFn = std::function<bool(const test_helper::TestModel&)>;
+std::vector<NamedModel> getNamedModels(const FilterFn& filter);
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
+
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
+ INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \
+ testing::Combine(testing::ValuesIn(getNamedDevices()), \
+ testing::ValuesIn(getNamedModels(filter))), \
+ printGeneratedTest)
// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
// TODO: Clean up the hierarchy for ValidationTest.
class ValidationTest : public GeneratedTestBase {};
-Model createModel(const ::test_helper::TestModel& testModel);
+Model createModel(const test_helper::TestModel& testModel);
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 307003c..5b630fd 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -117,6 +117,13 @@
return outputBuffers;
}
+std::string gtestCompliantName(std::string name) {
+ // gtest test names must only contain alphanumeric characters
+ std::replace_if(
+ name.begin(), name.end(), [](char c) { return !std::isalnum(c); }, '_');
+ return name;
+}
+
} // namespace android::hardware::neuralnetworks
namespace android::hardware::neuralnetworks::V1_0 {
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index 20b4565..cb22250 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -18,11 +18,13 @@
#include "VtsHalNeuralnetworks.h"
#include "1.0/Callbacks.h"
-#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
#include <android-base/logging.h>
+#include <hidl/ServiceManagement.h>
+#include <string>
+#include <utility>
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
@@ -76,34 +78,39 @@
ASSERT_NE(nullptr, preparedModel->get());
}
-// A class for test environment setup
-NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
- // This has to return a "new" object because it is freed inside
- // testing::AddGlobalTestEnvironment when the gtest is being torn down
- static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
- return instance;
-}
-
-void NeuralnetworksHidlEnvironment::registerTestServices() {
- registerTestService<IDevice>();
-}
-
-// The main test class for NEURALNETWORK HIDL HAL.
void NeuralnetworksHidlTest::SetUp() {
- testing::VtsHalHidlTargetTestBase::SetUp();
-
-#ifdef PRESUBMIT_NOT_VTS
- const std::string name =
- NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
- const std::string sampleDriver = "sample-";
- if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
- GTEST_SKIP();
- }
-#endif // PRESUBMIT_NOT_VTS
-
- ASSERT_NE(nullptr, kDevice.get());
+ testing::TestWithParam<NeuralnetworksHidlTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
}
+static NamedDevice makeNamedDevice(const std::string& name) {
+ return {name, IDevice::getService(name)};
+}
+
+static std::vector<NamedDevice> getNamedDevicesImpl() {
+ // Retrieves the name of all service instances that implement IDevice,
+ // including any Lazy HAL instances.
+ const std::vector<std::string> names = hardware::getAllHalInstanceNames(IDevice::descriptor);
+
+ // Get a handle to each device and pair it with its name.
+ std::vector<NamedDevice> namedDevices;
+ namedDevices.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
+ return namedDevices;
+}
+
+const std::vector<NamedDevice>& getNamedDevices() {
+ const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
+ return devices;
+}
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info) {
+ return gtestCompliantName(getName(info.param));
+}
+
+INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
+
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
@@ -130,14 +137,3 @@
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
} // namespace android::hardware::neuralnetworks::V1_0::vts::functional
-
-using android::hardware::neuralnetworks::V1_0::vts::functional::NeuralnetworksHidlEnvironment;
-
-int main(int argc, char** argv) {
- testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
- testing::InitGoogleTest(&argc, argv);
- NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
-
- int status = RUN_ALL_TESTS();
- return status;
-}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
index 48dc237..17f4613 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.h
@@ -17,40 +17,34 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_0_VTS_HAL_NEURALNETWORKS_H
+#include "1.0/Utils.h"
+
#include <android/hardware/neuralnetworks/1.0/IDevice.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
-
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <android-base/macros.h>
#include <gtest/gtest.h>
+#include <vector>
+
namespace android::hardware::neuralnetworks::V1_0::vts::functional {
-// A class for test environment setup
-class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
- NeuralnetworksHidlEnvironment() = default;
+using NamedDevice = Named<sp<IDevice>>;
+using NeuralnetworksHidlTestParam = NamedDevice;
- public:
- static NeuralnetworksHidlEnvironment* getInstance();
- void registerTestServices() override;
-};
-
-// The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
-
- public:
- NeuralnetworksHidlTest() = default;
- void SetUp() override;
-
+class NeuralnetworksHidlTest : public testing::TestWithParam<NeuralnetworksHidlTestParam> {
protected:
- const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
- NeuralnetworksHidlEnvironment::getInstance());
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(GetParam());
};
+const std::vector<NamedDevice>& getNamedDevices();
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info);
+
+#define INSTANTIATE_DEVICE_TEST(TestSuite) \
+ INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \
+ printNeuralnetworksHidlTest)
+
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 1ce751c..6d4534c 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -21,13 +21,15 @@
#include <android/hardware/neuralnetworks/1.0/types.h>
#include <algorithm>
#include <iosfwd>
+#include <string>
+#include <utility>
#include <vector>
#include "TestHarness.h"
namespace android::hardware::neuralnetworks {
// Create HIDL Request from the TestModel struct.
-V1_0::Request createRequest(const ::test_helper::TestModel& testModel);
+V1_0::Request createRequest(const test_helper::TestModel& testModel);
// After execution, copy out output results from the output memory pool.
std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
@@ -51,6 +53,21 @@
return index;
}
+template <typename Type>
+using Named = std::pair<std::string, Type>;
+
+template <typename Type>
+const std::string& getName(const Named<Type>& namedData) {
+ return namedData.first;
+}
+
+template <typename Type>
+const Type& getData(const Named<Type>& namedData) {
+ return namedData.second;
+}
+
+std::string gtestCompliantName(std::string name);
+
} // namespace android::hardware::neuralnetworks
namespace android::hardware::neuralnetworks::V1_0 {
diff --git a/neuralnetworks/1.1/types.hal b/neuralnetworks/1.1/types.hal
index 73705bb..3d78fb6 100644
--- a/neuralnetworks/1.1/types.hal
+++ b/neuralnetworks/1.1/types.hal
@@ -26,7 +26,6 @@
* The type of an operation in a model.
*/
enum OperationType : @1.0::OperationType {
-
/**
* BatchToSpace for N-dimensional tensors.
*
@@ -41,7 +40,8 @@
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
- * Supported tensor rank: 4
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be reshaped
@@ -51,8 +51,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
BATCH_TO_SPACE_ND = 29,
@@ -91,8 +91,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
*/
DIV = 30,
@@ -126,8 +124,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be same as input0.
*/
MEAN = 31,
@@ -138,7 +136,8 @@
*
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (the pad value is undefined)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (the pad value is undefined)
*
* Supported tensor rank: up to 4
*
@@ -160,11 +159,8 @@
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
- *
- * NOTE: The pad value for {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM}
- * is undefined.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
PAD = 32,
@@ -182,8 +178,10 @@
* Supported tensor {@link OperandType}:
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (the pad value is undefined)
*
- * Supported tensor rank: 4
+ * Supported tensor rank: 4, with "NHWC" (i.e., Num_samples, Height, Width,
+ * and Channels) data layout.
*
* Inputs:
* * 0: An n-D tensor, specifying the input.
@@ -201,8 +199,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SPACE_TO_BATCH_ND = 33,
@@ -232,8 +230,8 @@
* * 0: A tensor of the same {@link OperandType} as input0. Contains the
* same data as input, but has one or more dimensions of size 1
* removed.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SQUEEZE = 34,
@@ -278,8 +276,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
* where k is the number of bits set in shrink_axis_mask.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
STRIDED_SLICE = 35,
@@ -318,8 +316,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
*/
SUB = 36,
@@ -345,11 +341,10 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
TRANSPOSE = 37,
-
};
/**
diff --git a/neuralnetworks/1.1/types.t b/neuralnetworks/1.1/types.t
new file mode 100644
index 0000000..75ac2e7
--- /dev/null
+++ b/neuralnetworks/1.1/types.t
@@ -0,0 +1,158 @@
+%% template file for generating types.hal.
+%% see frameworks/ml/nn/tools/api/README.md.
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.1;
+
+import @1.0::Operand;
+import @1.0::OperationType;
+import @1.0::PerformanceInfo;
+
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : @1.0::OperationType {
+%insert Operation_1.1
+};
+
+/**
+ * The capabilities of a driver.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data.
+ */
+ PerformanceInfo float32Performance;
+
+ /**
+ * Driver performance when operating on asymmetric 8-bit quantized data.
+ */
+ PerformanceInfo quantized8Performance;
+
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16Performance;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * may not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+
+ /**
+ * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
+ * precision as low as that of the IEEE 754 16-bit floating-point format.
+ * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
+ * range and precision of the IEEE 754 32-bit floating-point format.
+ */
+ bool relaxComputationFloat32toFloat16;
+};
+
+/**
+ * Execution preferences.
+ */
+enum ExecutionPreference : int32_t {
+ /**
+ * Prefer executing in a way that minimizes battery drain.
+ * This is desirable for compilations that will be executed often.
+ */
+ LOW_POWER = 0,
+ /**
+ * Prefer returning a single answer as fast as possible, even if this causes
+ * more power consumption.
+ */
+ FAST_SINGLE_ANSWER = 1,
+ /**
+ * Prefer maximizing the throughput of successive frames, for example when
+ * processing successive frames coming from the camera.
+ */
+ SUSTAINED_SPEED = 2,
+};
diff --git a/neuralnetworks/1.1/vts/functional/Android.bp b/neuralnetworks/1.1/vts/functional/Android.bp
index c197e6d..69e1761 100644
--- a/neuralnetworks/1.1/vts/functional/Android.bp
+++ b/neuralnetworks/1.1/vts/functional/Android.bp
@@ -14,10 +14,11 @@
// limitations under the License.
//
-cc_defaults {
- name: "VtsHalNeuralNetworksV1_1TargetTestDefaults",
+cc_test {
+ name: "VtsHalNeuralnetworksV1_1TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
+ "BasicTests.cpp",
"TestAssertions.cpp",
"ValidateModel.cpp",
"ValidateRequest.cpp",
@@ -39,35 +40,12 @@
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
],
+ whole_static_libs: [
+ "neuralnetworks_generated_V1_0_example",
+ "neuralnetworks_generated_V1_1_example",
+ ],
header_libs: [
"libneuralnetworks_headers",
],
- test_suites: ["general-tests"],
-}
-
-cc_test {
- name: "VtsHalNeuralnetworksV1_1TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- "neuralnetworks_generated_V1_1_example",
- ],
-}
-
-cc_test {
- name: "PresubmitHalNeuralnetworksV1_1TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_1TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- "neuralnetworks_generated_V1_1_example",
- ],
- cflags: [
- "-DPRESUBMIT_NOT_VTS",
- ],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/neuralnetworks/1.1/vts/functional/BasicTests.cpp b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
index 2791e80..44836f0 100644
--- a/neuralnetworks/1.1/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.1/vts/functional/BasicTests.cpp
@@ -24,17 +24,17 @@
using V1_0::ErrorStatus;
// create device test
-TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
+TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
// status test
-TEST_F(NeuralnetworksHidlTest, StatusTest) {
+TEST_P(NeuralnetworksHidlTest, StatusTest) {
Return<DeviceStatus> status = kDevice->getStatus();
ASSERT_TRUE(status.isOk());
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
// initialization
-TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
Return<void> ret =
kDevice->getCapabilities_1_1([](ErrorStatus status, const Capabilities& capabilities) {
EXPECT_EQ(ErrorStatus::NONE, status);
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index fddfc2b..7a929d6 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -156,6 +156,20 @@
checkResults(testModel, outputs);
}
+void GeneratedTestBase::SetUp() {
+ testing::TestWithParam<GeneratedTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
+}
+
+std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
+ return TestModelManager::get().getTestModels(filter);
+}
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
+ const auto& [namedDevice, namedModel] = info.param;
+ return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
+}
+
// Tag for the generated tests
class GeneratedTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
index 273d1ec..cf449ea 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
@@ -18,29 +18,38 @@
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_GENERATED_TEST_HARNESS_H
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include "1.0/Utils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
-class GeneratedTestBase
- : public NeuralnetworksHidlTest,
- public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+using NamedModel = Named<const test_helper::TestModel*>;
+using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
+
+class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
protected:
- const test_helper::TestModel& kTestModel = *GetParam().second;
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
+ const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
};
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
- INSTANTIATE_TEST_SUITE_P( \
- TestGenerated, TestSuite, \
- testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
- [](const auto& info) { return info.param.first; })
+using FilterFn = std::function<bool(const test_helper::TestModel&)>;
+std::vector<NamedModel> getNamedModels(const FilterFn& filter);
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
+
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
+ INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \
+ testing::Combine(testing::ValuesIn(getNamedDevices()), \
+ testing::ValuesIn(getNamedModels(filter))), \
+ printGeneratedTest)
// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
// TODO: Clean up the hierarchy for ValidationTest.
class ValidationTest : public GeneratedTestBase {};
-Model createModel(const ::test_helper::TestModel& testModel);
+Model createModel(const test_helper::TestModel& testModel);
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index d53d43e..d56d40b 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -17,13 +17,15 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+#include <hidl/ServiceManagement.h>
+#include <string>
+#include <utility>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
-#include <android-base/logging.h>
-
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
using V1_0::ErrorStatus;
@@ -79,34 +81,39 @@
ASSERT_NE(nullptr, preparedModel->get());
}
-// A class for test environment setup
-NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
- // This has to return a "new" object because it is freed inside
- // testing::AddGlobalTestEnvironment when the gtest is being torn down
- static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
- return instance;
-}
-
-void NeuralnetworksHidlEnvironment::registerTestServices() {
- registerTestService<IDevice>();
-}
-
-// The main test class for NEURALNETWORK HIDL HAL.
void NeuralnetworksHidlTest::SetUp() {
- testing::VtsHalHidlTargetTestBase::SetUp();
-
-#ifdef PRESUBMIT_NOT_VTS
- const std::string name =
- NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
- const std::string sampleDriver = "sample-";
- if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
- GTEST_SKIP();
- }
-#endif // PRESUBMIT_NOT_VTS
-
- ASSERT_NE(nullptr, kDevice.get());
+ testing::TestWithParam<NeuralnetworksHidlTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
}
+static NamedDevice makeNamedDevice(const std::string& name) {
+ return {name, IDevice::getService(name)};
+}
+
+static std::vector<NamedDevice> getNamedDevicesImpl() {
+ // Retrieves the name of all service instances that implement IDevice,
+ // including any Lazy HAL instances.
+ const std::vector<std::string> names = hardware::getAllHalInstanceNames(IDevice::descriptor);
+
+ // Get a handle to each device and pair it with its name.
+ std::vector<NamedDevice> namedDevices;
+ namedDevices.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
+ return namedDevices;
+}
+
+const std::vector<NamedDevice>& getNamedDevices() {
+ const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
+ return devices;
+}
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info) {
+ return gtestCompliantName(getName(info.param));
+}
+
+INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
+
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
@@ -133,14 +140,3 @@
INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
} // namespace android::hardware::neuralnetworks::V1_1::vts::functional
-
-using android::hardware::neuralnetworks::V1_1::vts::functional::NeuralnetworksHidlEnvironment;
-
-int main(int argc, char** argv) {
- testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
- testing::InitGoogleTest(&argc, argv);
- NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
-
- int status = RUN_ALL_TESTS();
- return status;
-}
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
index 9d6194a..e879d84 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.h
@@ -17,41 +17,33 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_1_VTS_HAL_NEURALNETWORKS_H
-#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.1/IDevice.h>
#include <android/hardware/neuralnetworks/1.1/types.h>
-
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-
-#include <android-base/macros.h>
#include <gtest/gtest.h>
+#include <vector>
+#include "1.0/Utils.h"
namespace android::hardware::neuralnetworks::V1_1::vts::functional {
-// A class for test environment setup
-class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
- NeuralnetworksHidlEnvironment() = default;
+using NamedDevice = Named<sp<IDevice>>;
+using NeuralnetworksHidlTestParam = NamedDevice;
- public:
- static NeuralnetworksHidlEnvironment* getInstance();
- void registerTestServices() override;
-};
-
-// The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
-
- public:
- NeuralnetworksHidlTest() = default;
- void SetUp() override;
-
+class NeuralnetworksHidlTest : public testing::TestWithParam<NeuralnetworksHidlTestParam> {
protected:
- const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
- NeuralnetworksHidlEnvironment::getInstance());
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(GetParam());
};
+const std::vector<NamedDevice>& getNamedDevices();
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info);
+
+#define INSTANTIATE_DEVICE_TEST(TestSuite) \
+ INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \
+ printNeuralnetworksHidlTest)
+
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
diff --git a/neuralnetworks/1.2/IPreparedModel.hal b/neuralnetworks/1.2/IPreparedModel.hal
index f3508fe..1445f18 100644
--- a/neuralnetworks/1.2/IPreparedModel.hal
+++ b/neuralnetworks/1.2/IPreparedModel.hal
@@ -157,22 +157,44 @@
* unless the device itself is in a bad state.
*
* @param callback A callback object used to retrieve memory resources
- * corresponding to a unique identifiers ("slots").
- * @param requestChannel Used by the client to send a serialized Request to
- * the Burst for execution. The client must not change
- * the content of any data object referenced by the
- * Request (described by the {@link @1.0::DataLocation}
- * of an {@link OperandInformation}) until a result
- * has been received from resultChannel. Execution
- * must not change the content of any of the data
- * objects corresponding to Request inputs. requestChannel
+ * corresponding to unique identifiers ("slots").
+ * @param requestChannel FMQ used by the client to send a serialized Request
+ * to the Burst for execution. The client must not
+ * change the content of any data object referenced by
+ * the Request (described by the
+ * {@link @1.0::DataLocation} of an
+ * {@link OperandInformation}) until a result has been
+ * received from resultChannel. Execution must not
+ * change the content of any of the data objects
+ * corresponding to Request inputs. requestChannel
* must not be used to pass a second Request object
- * until a result has been received from resultChannel.
- * @param resultChannel Used by the service to return the results of an
- * execution to the client: the status of the execution
- * and OutputShape of all output tensors. resultChannel
- * must be used to return the results if a Request was
- * sent through the requestChannel.
+ * until a result has been received from
+ * resultChannel. The client must send the request
+ * messages to the consumer atomically by using
+ * MessageQueue::writeBlocking if the queue is
+ * blocking, or by using MessageQueue::write if the
+ * queue is non-blocking. When the service receives a
+ * packet, it must dequeue the entire packet from the
+ * requestChannel. The client must not send a request
+ * packet that exceeds the length of the FMQ.
+ * @param resultChannel FMQ used by the service to return the results of an
+ * execution to the client: the status of the
+ * execution, OutputShape of all output tensors, and
+ * timing information. resultChannel must be used to
+ * return the results if a Request was sent through the
+ * requestChannel. The service must send the result
+ * messages to the consumer atomically by using
+ * MessageQueue::writeBlocking if the queue is
+ * blocking, or by using MessageQueue::write if the
+ * queue is non-blocking. When the client receives a
+ * packet, it must dequeue the entire packet from the
+ * resultChannel. If the packet's length exceeds the
+ * size of the FMQ, the service must not send this
+ * result packet; instead, the service must send a
+ * packet consisting of the error code
+ * ErrorStatus::GENERAL_FAILURE, no information for the
+ * outputShapes, and an indication that timing
+ * information is unavailable.
* @return status Error status of configuring the execution burst, must be:
* - NONE if the burst is successfully configured
* - DEVICE_UNAVAILABLE if driver is offline or busy
diff --git a/neuralnetworks/1.2/types.hal b/neuralnetworks/1.2/types.hal
index f368ce2..837ced5 100644
--- a/neuralnetworks/1.2/types.hal
+++ b/neuralnetworks/1.2/types.hal
@@ -43,8 +43,6 @@
*
* Values of this operand type are either true or false. A zero value
* represents false; any other value represents true.
- *
- * Available since API level 29.
*/
BOOL = 6,
/**
@@ -55,14 +53,10 @@
* realValue = integerValue * scale.
*
* scale is a 32 bit floating point with value greater than zero.
- *
- * Available since API level 29.
*/
TENSOR_QUANT16_SYMM = 7,
/**
* A tensor of IEEE 754 16 bit floating point values.
- *
- * Available since API level 29.
*/
TENSOR_FLOAT16 = 8,
/**
@@ -70,14 +64,10 @@
*
* Values of this operand type are either true or false. A zero value
* represents false; any other value represents true.
- *
- * Available since API level 29.
*/
TENSOR_BOOL8 = 9,
/**
* An IEEE 754 16 bit floating point scalar value.
- *
- * Available since API level 29.
*/
FLOAT16 = 10,
/**
@@ -90,14 +80,13 @@
* - scales: an array of positive 32 bit floating point values.
* The size of the scales array must be equal to dimensions[channelDim].
*
+ *{@link SymmPerChannelQuantParams} must hold the parameters for an Operand of this type.
* The channel dimension of this tensor must not be unknown (dimensions[channelDim] != 0).
*
* The formula is:
* realValue[..., C, ...] =
* integerValue[..., C, ...] * scales[C]
* where C is an index in the Channel dimension.
- *
- * Available since API level 29.
*/
TENSOR_QUANT8_SYMM_PER_CHANNEL = 11,
/**
@@ -110,8 +99,6 @@
*
* The formula is:
* real_value = (integer_value - zeroPoint) * scale.
- *
- * Available since API level 29.
*/
TENSOR_QUANT16_ASYMM = 12,
/**
@@ -122,20 +109,19 @@
* realValue = integerValue * scale.
*
* scale is a 32 bit floating point with value greater than zero.
- *
- * Available since API level 29.
*/
TENSOR_QUANT8_SYMM = 13,
+
/*
- * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
- * OEM operation and data types.
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
*
* OEM specific scalar value.
* OEM = 10000,
*/
/*
- * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
- * OEM operation and data types.
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
*
* A tensor of OEM specific values.
* TENSOR_OEM_BYTE = 10001,
@@ -166,6 +152,7 @@
* The type of an operation in a model.
*/
enum OperationType : int32_t {
+
/**
* Adds two tensors, element-wise.
*
@@ -187,12 +174,12 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
* dimension is only compatible with 0 or 1. The size of the output
* dimension is zero if either of corresponding input dimension is zero.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -202,14 +189,16 @@
* * 0: A tensor.
* * 1: A tensor of the same {@link OperandType}, and compatible dimensions
* as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
* * 2: An {@link OperandType::INT32} scalar, and has to be one of the
* {@link FusedActivationFunc} values. Specifies the activation to
* invoke on the result.
*
* Outputs:
* * 0: The sum, a tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
ADD = @1.1::OperationType:ADD,
@@ -227,7 +216,7 @@
* ) / sum(1)
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -235,13 +224,14 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -263,12 +253,12 @@
* invoke on the result.
* * 10: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -285,13 +275,13 @@
* invoke on the result.
* * 7: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
AVERAGE_POOL_2D = @1.1::OperationType:AVERAGE_POOL_2D,
@@ -302,33 +292,34 @@
* dimensions except the dimension along the concatenation axis.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (full support since API
- * level 29, see the input section)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the input section)
*
* Supported tensor rank: up to 4
*
* Inputs:
* * 0 ~ n-1: The list of n input tensors, of shape
* [D0, D1, ..., Daxis(i), ..., Dm].
- * Before API level 29, all input tensors of
+ * Before HAL version 1.2, all input tensors of
* {@link OperandType::TENSOR_QUANT8_ASYMM}
* must have the same scale and zeroPoint as the output tensor.
- * Since API level 29, zero-sized tensors are supported.
+ * Since HAL version 1.2, zero-sized tensors are supported.
* * n: An {@link OperandType::INT32} scalar, specifying the
* concatenation axis.
*
* Outputs:
* * 0: The output, a tensor of the same {@link OperandType} as the input
* tensors. The output shape is [D0, D1, ..., sum(Daxis(i)), ..., Dm].
- *
- * Available since API level 27.
+ * Since HAL version 1.2, for a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint values can be different from
+ * input tensors. Before HAL version 1.2 they have to be the same as for the input tensors.
*/
CONCATENATION = @1.1::OperationType:CONCATENATION,
/**
- * Performs an 2-D convolution operation.
+ * Performs a 2-D convolution operation.
*
* The CONV_2D op sweeps a 2-D filter that can mix channels together over a
* batch of images, applying the filter to each window of each image of the
@@ -354,7 +345,7 @@
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale).
*
- * Available since API level 29:
+ * Available since HAL version 1.2:
* * 16 bit floating point:
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
*
@@ -368,27 +359,29 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input. Since API level 29, zero batches is supported
- * for this tensor.
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
- * filter. For tensor of type
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (extraParams.channelQuant.channelDim) must be set to 0.
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
- * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
- * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
* bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
@@ -407,36 +400,37 @@
* invoke on the result.
* * 10: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 11: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on width dimension. If this input is set,
* input 12 (dilation factor for height) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on height dimension. If this input is set,
* input 11 (dilation factor for width) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
- * specifying the input. Since API level 29, zero batches is supported
- * for this tensor.
+ * specifying the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: A 4-D tensor, of shape
* [depth_out, filter_height, filter_width, depth_in], specifying the
- * filter. For tensor of type
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (extraParams.channelQuant.channelDim) must be set to 0.
+ * filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
- * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
- * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
* bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
@@ -450,26 +444,23 @@
* invoke on the result.
* * 7: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 8: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on width dimension. If this input is set,
* input 9 (dilation factor for height) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on height dimension. If this input is set,
* input 8 (dilation factor for width) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. Before API level 29,
- * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
- * following condition must be satisfied:
- * output_scale > input_scale * filter_scale
- *
- * Available since API level 27.
+ * [batches, out_height, out_width, depth_out].
+ * Before HAL version 1.2, for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied: output_scale > input_scale * filter_scale
*/
CONV_2D = @1.1::OperationType:CONV_2D,
@@ -504,7 +495,7 @@
* * * {@link OperandType::TENSOR_INT32} for bias (with scale set to
* * * input.scale * filter.scale).
*
- * Available since API level 29:
+ * Available since HAL version 1.2:
* * 16 bit floating point:
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
*
@@ -518,6 +509,7 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both explicit padding and implicit padding are supported.
*
@@ -525,18 +517,19 @@
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
* specifying the input.
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
- * specifying the filter. For tensor of type
- * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (extraParams.channelQuant.channelDim) must be set to 3.
+ * specifying the filter.
+ * For tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 3.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
- * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
- * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
* bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
@@ -557,17 +550,17 @@
* invoke on the result.
* * 11: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 12: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on width dimension. If this input is set,
* input 13 (dilation factor for height) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 13: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on height dimension. If this input is set,
* input 12 (dilation factor for width) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -575,14 +568,14 @@
* * 1: A 4-D tensor, of shape [1, filter_height, filter_width, depth_out],
* specifying the filter.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
- * tensor of type {@link OperandType::TENSOR_FLOAT32} or
- * {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
+ * tensor of type {@link OperandType::TENSOR_FLOAT32}
+ * or {@link OperandType::TENSOR_FLOAT16} the bias must be of the same
* type. For filter tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint
- * of 0 and bias_scale == input_scale * filter_scale. For filter tensor
- * of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}, the bias
- * should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of
- * 0 and bias_scale of 0. The actual scale of each value 'i' is equal to
+ * of 0 and bias_scale == input_scale * filter_scale.
+ * For filter tensor of {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL},
+ * the bias should be of {@link OperandType::TENSOR_INT32}, with zeroPoint of 0
+ * and bias_scale of 0. The actual scale of each value 'i' is equal to
* bias_scale[i] = input_scale * filter_scale[i].
* * 3: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
@@ -598,27 +591,24 @@
* invoke on the result.
* * 8: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 9: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for width. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on width dimension. If this input is set,
* input 10 (dilation factor for height) must be specified as well.
- * Available since API level 29.
+ * Available since HAL version 1.2.
* * 10: An optional {@link OperandType::INT32} scalar, specifying the dilation
* factor for height. Defaults to 1. If set to k > 1, there will be k-1 skipped
* cells between each filter element on height dimension. If this input is set,
* input 9 (dilation factor for width) must be specified as well.
- * Available since API level 29.
-
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
- * [batches, out_height, out_width, depth_out]. Before API level 29,
- * for output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the
- * following condition must be satisfied:
+ * [batches, out_height, out_width, depth_out]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
+ * the following condition must be satisfied:
* output_scale > input_scale * filter_scale
- *
- * Available since API level 27.
*/
DEPTHWISE_CONV_2D = @1.1::OperationType:DEPTHWISE_CONV_2D,
@@ -638,7 +628,7 @@
* be divisible by block_size * block_size
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -646,6 +636,7 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -655,13 +646,13 @@
* of the input depth.
* * 2: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape [batch, height*block_size,
* width*block_size, depth/(block_size*block_size)].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
DEPTH_TO_SPACE = @1.1::OperationType:DEPTH_TO_SPACE,
@@ -674,22 +665,21 @@
*
* Supported input tensor {@link OperandType}:
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
- * * {@link OperandType::TENSOR_QUANT8_SYMM} (since API level 29)
- * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since API level 29)
+ * * {@link OperandType::TENSOR_QUANT8_SYMM} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} (since HAL version 1.2)
*
* Supported output tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}.
*
* Supported tensor rank: up to 4
*
* Inputs:
- * * 0: A tensor. Since API level 29, this tensor may be zero-sized.
+ * * 0: A tensor.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
* * 0: A tensor with the same shape as input0.
- *
- * Available since API level 27.
*/
DEQUANTIZE = @1.1::OperationType:DEQUANTIZE,
@@ -730,8 +720,8 @@
* * 0: A n-D tensor with the same rank and shape as the Values
* tensor, except for the first dimension which has the same size
* as Lookups' only dimension.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input1.
*/
EMBEDDING_LOOKUP = @1.1::OperationType:EMBEDDING_LOOKUP,
@@ -739,7 +729,7 @@
* Computes element-wise floor() on the input tensor.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
@@ -750,8 +740,6 @@
* Outputs:
* * 0: The output tensor, of the same {@link OperandType} and dimensions as
* the input tensor.
- *
- * Available since API level 27.
*/
FLOOR = @1.1::OperationType:FLOOR,
@@ -764,7 +752,7 @@
* outputs = activation(inputs * weights’ + bias)
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -777,8 +765,8 @@
* [batch_size, input_size], where "input_size" corresponds to the
* number of inputs to the layer, matching the second dimension of
* weights, and "batch_size" is calculated by dividing the number of
- * elements by "input_size". Since API level 29, zero batch_size is
- * supported for this tensor.
+ * elements by "input_size".
+ * Since HAL version 1.2, zero batch_size is supported for this tensor.
* * 1: A 2-D tensor, specifying the weights, of shape
* [num_units, input_size], where "num_units" corresponds to the number
* of output nodes.
@@ -793,12 +781,9 @@
* invoke on the result.
*
* Outputs:
- * * 0: The output tensor, of shape [batch_size, num_units]. Before API
- * level 29, For output tensor of {@link
- * OperandType::TENSOR_QUANT8_ASYMM}, the following condition must be
- * satisfied: output_scale > input_scale * filter_scale.
- *
- * Available since API level 27.
+ * * 0: The output tensor, of shape [batch_size, num_units]. Before HAL version 1.2, for
+ * output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM}, the following
+ * condition must be satisfied: output_scale > input_scale * filter_scale.
*/
FULLY_CONNECTED = @1.1::OperationType:FULLY_CONNECTED,
@@ -849,13 +834,13 @@
*
* Outputs:
* * 0: Output. A tensor with shape [ k …].
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input2.
* * 1: Hits. A boolean tensor with shape [ k ] indicates whether the lookup
* hits (True) or not (False).
* Stored as {@link OperandType::TENSOR_QUANT8_ASYMM} with offset 0
* and scale 1.0f.
* A non-zero byte represents True, a hit. A zero indicates otherwise.
- *
- * Available since API level 27.
*/
HASHTABLE_LOOKUP = @1.1::OperationType:HASHTABLE_LOOKUP,
@@ -872,12 +857,12 @@
* 1-D slice along dimension dim.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
*
* Supported tensor rank: up to 4
- * Tensors with rank less than 4 are only supported since API level 29.
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be normalized.
@@ -885,14 +870,12 @@
* specifying the dimension normalization would be performed on.
* Negative index is used to specify axis from the end (e.g. -1 for
* the last axis). Must be in the range [-n, n).
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 128 and the zeroPoint must be 128.
- *
- * Available since API level 27.
*/
L2_NORMALIZATION = @1.1::OperationType:L2_NORMALIZATION,
@@ -909,20 +892,21 @@
* sum(1))
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -944,12 +928,12 @@
* invoke on the result.
* * 10: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -966,13 +950,11 @@
* invoke on the result.
* * 7: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
*/
L2_POOL_2D = @1.1::OperationType:L2_POOL_2D,
@@ -994,11 +976,11 @@
* 1-D slice along specified dimension.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
- * Tensors with rank less than 4 are only supported since API level 29.
+ * Tensors with rank less than 4 are only supported since HAL version 1.2.
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
@@ -1011,10 +993,10 @@
* For input tensor of {@link OperandType::TENSOR_FLOAT32}, the bias
* value must be of {@link OperandType::FLOAT32}.
* * 3: A scalar, specifying the scale factor, alpha.
- * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the alpha
- * value must be of {@link OperandType::FLOAT16}.
- * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the alpha
- * value must be of {@link OperandType::FLOAT32}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT16}, the
+ * alpha value must be of {@link OperandType::FLOAT16}.
+ * For input tensor of {@link OperandType::TENSOR_FLOAT32}, the
+ * alpha value must be of {@link OperandType::FLOAT32}.
* * 4: A scalar, specifying the exponent, beta.
* For input tensor of {@link OperandType::TENSOR_FLOAT16}, the beta
* value must be of {@link OperandType::FLOAT16}.
@@ -1024,12 +1006,10 @@
* specifying the dimension normalization would be performed on.
* Negative index is used to specify axis from the end (e.g. -1 for
* the last axis). Must be in the range [-n, n).
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
*/
LOCAL_RESPONSE_NORMALIZATION = @1.1::OperationType:LOCAL_RESPONSE_NORMALIZATION,
@@ -1041,22 +1021,20 @@
* output = 1 / (1 + exp(-input))
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. Since API level 29, this tensor may
- * be zero-sized.
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
- *
- * Available since API level 27.
*/
LOGISTIC = @1.1::OperationType:LOGISTIC,
@@ -1064,7 +1042,7 @@
* Projects an input to a bit vector via locality senstive hashing.
*
* Supported input tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_INT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
@@ -1086,7 +1064,7 @@
* Tensor[1].Dim[0] == Tensor[2].Dim[0]
* * 3: Type:
* Sparse:
- * Value LSHProjectionType_SPARSE(=3) (since API level 29).
+ * Value LSHProjectionType_SPARSE(=3) (since HAL version 1.2).
* Computed bit vector is considered to be sparse.
* Each output element is an int32 made up of multiple bits
* computed from hash functions.
@@ -1107,14 +1085,12 @@
* Outputs:
* * 0: If the projection type is Sparse:
* Output.Dim == { Tensor[0].Dim[0] }
- * A tensor of int32 that represents hash signatures,
+ * A tensor of int32 that represents hash signatures.
*
* If the projection type is Dense:
* Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] }
* A flattened tensor that represents projected bit vectors.
- *
- * Available since API level 27.
- * The offset value for sparse projections was added in API level 29.
+ * The offset value for sparse projections was added in HAL version 1.2.
*/
LSH_PROJECTION = @1.1::OperationType:LSH_PROJECTION,
@@ -1170,7 +1146,7 @@
* matrix, each element of which is the product of the corresponding
* elements of the input matrices.
*
- * Since API level 29 LSTM supports layer normalization.
+ * Since HAL version 1.2 LSTM supports layer normalization.
* In case layer normalization is used, the inputs to internal activation
* functions (sigmoid and \f$g\f$) are normalized, rescaled and recentered
* following an approach from section 3.1 from
@@ -1197,7 +1173,7 @@
* * The projection bias (\f$b_{proj}\f$) may (but not required to) have a
* value if the recurrent projection layer exists, and should otherwise
* have no value.
- * * (API level >= 29) The four layer normalization weights either all have
+ * * (HAL version 1.2 or later) The four layer normalization weights either all have
* values or none of them have values. Additionally, if CIFG is used,
* input layer normalization weights tensor is omitted and the other layer
* normalization weights either all have values or none of them have
@@ -1228,7 +1204,7 @@
* Jimmy Ba et al. "Layer Normalization"
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* All input and output tensors must be of the same type.
@@ -1291,24 +1267,24 @@
* * 21:The clipping threshold (\f$t_{cell}\f$) for the cell state, such
* that values are bound within [-cell_clip, cell_clip]. If set to 0.0
* then clipping is disabled.
- * Until API level 29 this scalar must be of type {@link
- * FLOAT32}. Since API level 29, if all the input
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
* tensors have type {@link OperandType::TENSOR_FLOAT32}, this
* scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link
- * TENSOR_FLOAT16}, this scalar must be of type {@link
- * FLOAT16}.
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
* * 22:The clipping threshold (\f$t_{proj}\f$) for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
- * Until API level 29 this scalar must be of type {@link
- * FLOAT32}. Since API level 29, if all the input
+ * Until HAL version 1.2 this scalar must be of type {@link
+ * OperandType::FLOAT32}. Since HAL version 1.2, if all the input
* tensors have type {@link OperandType::TENSOR_FLOAT32}, this
* scalar must be of the type {@link OperandType::FLOAT32},
* otherwise if all the input tensors have the type {@link
- * TENSOR_FLOAT16}, this scalar must be of type {@link
- * FLOAT16}.
- * Since API level 29 there are additional inputs to this op:
+ * OperandType::TENSOR_FLOAT16}, this scalar must be of type {@link
+ * OperandType::FLOAT16}.
+ * Since HAL version 1.2 there are additional inputs to this op:
* * 23:The input layer normalization weights.
* A 1-D tensor of shape [num_units]. Used to rescale normalized inputs
* to activation at input gate.
@@ -1333,8 +1309,6 @@
* * 3: The output (\f$o_t\f$).
* A 2-D tensor of shape [batch_size, output_size]. This is effectively
* the same as the current “output state (out)” value.
- *
- * Available since API level 27.
*/
LSTM = @1.1::OperationType:LSTM,
@@ -1352,7 +1326,7 @@
* )
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -1360,13 +1334,14 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both explicit padding and implicit padding are supported.
*
* Inputs (explicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the padding on
* the left, in the ‘width’ dimension.
* * 2: An {@link OperandType::INT32} scalar, specifying the padding on
@@ -1388,12 +1363,12 @@
* invoke on the result.
* * 10: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Inputs (implicit padding):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the implicit
* padding scheme, has to be one of the
* following values: {0 (NONE), 1 (SAME), 2 (VALID)}.
@@ -1410,13 +1385,13 @@
* invoke on the result.
* * 7: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
MAX_POOL_2D = @1.1::OperationType:MAX_POOL_2D,
@@ -1435,15 +1410,15 @@
* of the input operands. It starts with the trailing dimensions, and works
* its way forward.
*
- * Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
- * * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM}
- *
- * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
* dimension is only compatible with 0 or 1. The size of the output
* dimension is zero if either of corresponding input dimension is zero.
*
+ * Supported tensor {@link OperandType}:
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
+ * * {@link OperandType::TENSOR_FLOAT32}
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ *
* Supported tensor rank: up to 4
*
* Inputs:
@@ -1459,8 +1434,6 @@
* For output tensor of {@link OperandType::TENSOR_QUANT8_ASYMM},
* the following condition must be satisfied:
* output_scale > input1_scale * input2_scale.
- *
- * Available since API level 27.
*/
MUL = @1.1::OperationType:MUL,
@@ -1472,20 +1445,20 @@
* output = max(0, input)
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. Since API level 29, this tensor may
- * be zero-sized.
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU = @1.1::OperationType:RELU,
@@ -1497,20 +1470,20 @@
* output = min(1.f, max(-1.f, input))
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. Since API level 29, this tensor may
- * be zero-sized.
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
- * * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * * 0: The output tensor of the same shape as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU1 = @1.1::OperationType:RELU1,
@@ -1522,20 +1495,20 @@
* output = min(6, max(0, input))
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. Since API level 29, this tensor may
- * be zero-sized.
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RELU6 = @1.1::OperationType:RELU6,
@@ -1546,7 +1519,7 @@
* tensor, but with a newly specified shape.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -1565,8 +1538,8 @@
*
* Outputs:
* * 0: The output tensor, of shape specified by the input shape.
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RESHAPE = @1.1::OperationType:RESHAPE,
@@ -1578,30 +1551,31 @@
* same as corner pixels of input.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Both resizing by shape and resizing by scale are supported.
*
* Inputs (resizing by shape):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
- * the input. Since API level 29, zero batches is supported for this
- * tensor.
+ * the input.
+ * Since HAL version 1.2, zero batches is supported for this tensor.
* * 1: An {@link OperandType::INT32} scalar, specifying the output
* width of the output tensor.
* * 2: An {@link OperandType::INT32} scalar, specifying the output
* height of the output tensor.
* * 3: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
- * Inputs (resizing by scale, since API level 29):
+ * Inputs (resizing by scale, since HAL version 1.2):
* * 0: A 4-D tensor, of shape [batches, height, width, depth], specifying
* the input. Zero batches is supported for this tensor.
* * 1: A scalar, specifying width_scale, the scaling factor of the width
@@ -1622,8 +1596,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RESIZE_BILINEAR = @1.1::OperationType:RESIZE_BILINEAR,
@@ -1644,7 +1618,7 @@
* argument (if not “NONE”).
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* The input tensors must all be the same type.
@@ -1676,8 +1650,6 @@
* * 1: output.
* A 2-D tensor of shape [batch_size, num_units]. This is effectively
* the same as the current state value.
- *
- * Available since API level 27.
*/
RNN = @1.1::OperationType:RNN,
@@ -1696,34 +1668,32 @@
* independently on each 1-D slice along specified dimension.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
* Supported tensor rank: up to 4.
- * Tensors with rank other than 2 or 4 are only supported since API level 29.
+ * Tensors with rank other than 2 or 4 are only supported since HAL version 1.2.
*
* Inputs:
- * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped. Since
- * API level 29, this tensor may be zero-sized.
+ * * 0: A 2-D or 4-D tensor, specifying the tensor to be reshaped.
+ * Since HAL version 1.2, this tensor may be zero-sized.
* * 1: A scalar, specifying the positive scaling factor for the exponent,
* beta. If input0 is of {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_QUANT8_ASYMM}, the scalar must be of
- * {@link OperandType::FLOAT32}. If input0 is of {@link
- * OperandType::TENSOR_FLOAT16}, then the scalar must be of {@link
- * OperandType::FLOAT16}.
+ * {@link OperandType::FLOAT32}.
+ * If input0 is of {@link OperandType::TENSOR_FLOAT16}, then the
+ * scalar must be of {@link OperandType::FLOAT16}.
* * 2: An optional {@link OperandType::INT32} scalar, default to -1,
* specifying the dimension the activation would be performed on.
* Negative index is used to specify axis from the end (e.g. -1 for
* the last axis). Must be in the range [-n, n).
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 256 and the zeroPoint must be 0.
- *
- * Available since API level 27.
*/
SOFTMAX = @1.1::OperationType:SOFTMAX,
@@ -1742,7 +1712,7 @@
* The input tensor's height and width must be divisible by block_size.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -1750,6 +1720,7 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Inputs:
* * 0: A 4-D tensor, of shape [batches, height, width, depth_in],
@@ -1759,13 +1730,13 @@
* input height and width.
* * 2: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: The output 4-D tensor, of shape [batches, height/block_size,
* width/block_size, depth_in*block_size*block_size].
- *
- * Available since API level 27.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SPACE_TO_DEPTH = @1.1::OperationType:SPACE_TO_DEPTH,
@@ -1809,7 +1780,7 @@
* the filters.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* All input tensors must be the same type.
@@ -1843,8 +1814,6 @@
* * 1: output.
* A 2-D tensor of the same {@link OperandType} as the inputs, with shape
* [batch_size, num_units].
- *
- * Available since API level 27.
*/
SVDF = @1.1::OperationType:SVDF,
@@ -1856,22 +1825,20 @@
* output = tanh(input)
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
*
* Supported tensor rank: up to 4.
*
* Inputs:
- * * 0: A tensor, specifying the input. Since API level 29, this tensor may
- * be zero-sized.
+ * * 0: A tensor, specifying the input.
+ * Since HAL version 1.2, this tensor may be zero-sized.
*
* Outputs:
* * 0: The output tensor of same shape as input0.
* For {@link OperandType::TENSOR_QUANT8_ASYMM},
* the scale must be 1.f / 128 and the zeroPoint must be 128.
- *
- * Available since API level 27.
*/
TANH = @1.1::OperationType:TANH,
@@ -1886,7 +1853,7 @@
* This is the reverse of SpaceToBatch.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -1894,6 +1861,7 @@
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be reshaped
@@ -1906,8 +1874,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
BATCH_TO_SPACE_ND = @1.1::OperationType:BATCH_TO_SPACE_ND,
@@ -1931,12 +1899,12 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
* dimension is only compatible with 0 or 1. The size of the output
* dimension is zero if either of corresponding input dimension is zero.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
*
* Supported tensor rank: up to 4
@@ -1951,8 +1919,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
*/
DIV = @1.1::OperationType:DIV,
@@ -1965,7 +1931,7 @@
* length 1.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -1987,21 +1953,21 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be same as input0.
*/
MEAN = @1.1::OperationType:MEAN,
/**
- * Pads a tensor with zeros.
+ * Pads a tensor.
*
* This operation pads a tensor according to the specified paddings.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (full support since API
- * level 29, see the output section)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
*
* Supported tensor rank: up to 4
*
@@ -2023,12 +1989,12 @@
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*
- * NOTE: Before API level 29, the pad value for
- * {@link ANEURALNETWORKS_TENSOR_QUANT8_ASYMM} is undefined.
- * Since API level 29, the pad value is always the logical zero.
- *
- * Available since API level 28.
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
*/
PAD = @1.1::OperationType:PAD,
@@ -2044,14 +2010,16 @@
* dimensions of the input are optionally zero padded according to paddings.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
+ * (full support since HAL version 1.2, see the output section)
*
* Supported tensor rank: 4, with "NHWC" or "NCHW" data layout.
* With the default data layout NHWC, the data is stored in the order of:
* [batch, height, width, channels]. Alternatively, the data layout could
* be NCHW, the data storage order of: [batch, channels, height, width].
+ * NCHW is supported since HAL version 1.2.
*
* Inputs:
* * 0: An n-D tensor, specifying the input.
@@ -2068,12 +2036,16 @@
* end of dimension i.
* * 3: An optional {@link OperandType::BOOL} scalar, default to false.
* Set to true to specify NCHW data layout for input0 and output0.
- * Available since API level 29.
+ * Available since HAL version 1.2.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*
- * Available since API level 28.
+ * NOTE: Before HAL version 1.2, the pad value for
+ * {@link OperandType::TENSOR_QUANT8_ASYMM} is undefined.
+ * Since HAL version 1.2, the pad value is always the logical zero.
*/
SPACE_TO_BATCH_ND = @1.1::OperationType:SPACE_TO_BATCH_ND,
@@ -2086,7 +2058,7 @@
* dimensions by specifying the axes (input1).
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -2104,8 +2076,8 @@
* * 0: A tensor of the same {@link OperandType} as input0. Contains the
* same data as input, but has one or more dimensions of size 1
* removed.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SQUEEZE = @1.1::OperationType:SQUEEZE,
@@ -2119,7 +2091,7 @@
* reverse slice.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -2151,8 +2123,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0 and rank (n - k),
* where k is the number of bits set in shrink_axis_mask.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
STRIDED_SLICE = @1.1::OperationType:STRIDED_SLICE,
@@ -2176,14 +2148,14 @@
* input2.dimension = {5, 4, 3, 1}
* output.dimension = {5, 4, 3, 2}
*
- * Since API level 29, generic zero-sized input tensor is supported. Zero
+ * Since HAL version 1.2, generic zero-sized input tensor is supported. Zero
* dimension is only compatible with 0 or 1. The size of the output
* dimension is zero if either of corresponding input dimension is zero.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
- * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since API level 29)
+ * * {@link OperandType::TENSOR_QUANT8_ASYMM} (since HAL version 1.2)
*
* Supported tensor rank: up to 4
*
@@ -2197,8 +2169,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
SUB = @1.1::OperationType:SUB,
@@ -2212,7 +2184,7 @@
* regular matrix transpose on 2-D input Tensors.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16} (since HAL version 1.2)
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -2220,14 +2192,14 @@
*
* Inputs:
* * 0: An n-D tensor, specifying the tensor to be transposed.
- * Since API level 29, this tensor may be zero-sized.
+ * Since HAL version 1.2, this tensor may be zero-sized.
* * 1: An optional 1-D Tensor of {@link OperandType::TENSOR_INT32},
* the permutation of the dimensions of the input tensor.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 28.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
TRANSPOSE = @1.1::OperationType:TRANSPOSE,
@@ -2245,8 +2217,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
ABS = 38,
@@ -2269,8 +2239,6 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
- *
- * Available since API level 29.
*/
// There is no underscore in ARG_MAX to avoid name conflict with
// the macro defined in libc/kernel/uapi/linux/limits.h.
@@ -2295,8 +2263,6 @@
*
* Outputs:
* * 0: An (n - 1)-D {@link OperandType::TENSOR_INT32} tensor.
- *
- * Available since API level 29.
*/
ARGMIN = 40, // See ARGMAX for naming discussion.
@@ -2341,8 +2307,8 @@
* * 0: A tensor of the same {@link OperandType} as input0, with shape
* [num_rois, num_classes * 4], specifying the coordinates of each
* output bounding box for each class, with format [x1, y1, x2, y2].
- *
- * Available since API level 29.
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
*/
AXIS_ALIGNED_BBOX_TRANSFORM = 41,
@@ -2482,17 +2448,15 @@
* then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link
- * TENSOR_FLOAT16}, this scalar must be of type {@link
- * FLOAT16}.
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
* * 50: The clipping threshold for the output from the
* projection layer, such that values are bound within
* [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
* If all the input tensors have type {@link OperandType::TENSOR_FLOAT32},
* this scalar must be of the type {@link OperandType::FLOAT32},
- * otherwise if all the input tensors have the type {@link
- * TENSOR_FLOAT16}, this scalar must be of type {@link
- * FLOAT16}.
+ * otherwise if all the input tensors have the type {@link OperandType::TENSOR_FLOAT16},
+ * this scalar must be of type {@link OperandType::FLOAT16}.
* * 51: merge_outputs
* An {@link OperandType::BOOL} scalar specifying if the outputs
* from forward and backward cells should be merged.
@@ -2539,8 +2503,6 @@
* A 3-D tensor of shape:
* If time-major: [max_time, batch_size, bw_output_size]
* If batch-major: [batch_size, max_time, bw_output_size]
- *
- * Available since API level 29.
*/
BIDIRECTIONAL_SEQUENCE_LSTM = 42,
@@ -2658,8 +2620,6 @@
* (timeMajor). If it is set to true, then the shape is set to
* [maxTime, batchSize, bwNumUnits], otherwise the shape is set to
* [batchSize, maxTime, bwNumUnits].
- *
- * Available since API level 29.
*/
BIDIRECTIONAL_SEQUENCE_RNN = 43,
@@ -2737,8 +2697,6 @@
* * 3: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_output_rois], specifying the batch index of each box. Boxes
* with the same batch index are grouped together.
- *
- * Available since API level 29.
*/
BOX_WITH_NMS_LIMIT = 44,
@@ -2762,8 +2720,6 @@
*
* Outputs:
* * 0: A tensor with the same shape as input0.
- *
- * Available since API level 29.
*/
CAST = 45,
@@ -2800,8 +2756,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
CHANNEL_SHUFFLE = 46,
@@ -2856,14 +2812,14 @@
* * 11: A scalar, score_threshold. Boxes with scores lower than the
* threshold are filtered before sending to the NMS algorithm. The
* scalar must be of {@link OperandType::FLOAT16} if input0 is of
- * {@link OperandType::TENSOR_FLOAT16} and of {@link
- * OperandType::FLOAT32} if input0 is of {@link
- * OperandType::TENSOR_FLOAT32}.
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
* * 12: A scalar, specifying the IoU threshold for hard NMS. The scalar
- * must be of {@link OperandType::FLOAT16} if input0 is of {@link
- * OperandType::TENSOR_FLOAT16} and of {@link
- * OperandType::FLOAT32} if input0 is of {@link
- * OperandType::TENSOR_FLOAT32}.
+ * must be of {@link OperandType::FLOAT16} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
* * 13: An {@link OperandType::BOOL} scalar, set to true to include
* background class in the list of label map for the output, set
* to false to not include the background. When the background
@@ -2882,8 +2838,6 @@
* output detection.
* * 3: An 1-D {@link OperandType::TENSOR_INT32} tensor, of shape [batches],
* specifying the number of valid output detections for each batch.
- *
- * Available since API level 29.
*/
DETECTION_POSTPROCESSING = 47,
@@ -2908,8 +2862,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
EQUAL = 48,
@@ -2927,8 +2879,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
EXP = 49,
@@ -2956,8 +2906,8 @@
* Outputs:
* * 0: An (n + 1)-D tensor with the same {@link OperandType} and data as
* input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
EXPAND_DIMS = 50,
@@ -2994,8 +2944,8 @@
*
* Outputs:
* * 0: An (n + k - 1)-D tensor with the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
GATHER = 51,
@@ -3074,8 +3024,6 @@
* * 2: A 1-D {@link OperandType::TENSOR_INT32} tensor, of shape
* [num_output_rois], specifying the batch index of each box. Boxes
* with the same batch index are grouped together.
- *
- * Available since API level 29.
*/
GENERATE_PROPOSALS = 52,
@@ -3100,8 +3048,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
GREATER = 53,
/**
@@ -3125,8 +3071,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
GREATER_EQUAL = 54,
@@ -3191,7 +3135,8 @@
* [depth_out, filter_height, filter_width, depth_group], specifying
* the filter, where depth_out must be divisible by num_groups. For
* tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * the channel dimension must be set to 0.
+ * the channel dimension (channelDim at
+ * {@link SymmPerChannelQuantParams}) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
@@ -3229,7 +3174,8 @@
* [depth_out, filter_height, filter_width, depth_group], specifying
* the filter, where depth_out must be divisible by num_groups. For
* tensor of type {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL}
- * the channel dimension must be set to 0.
+ * the channel dimension (SymmPerChannelQuantParams::channelDim)
+ * must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias must be of the same
@@ -3258,8 +3204,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth_out].
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
GROUPED_CONV_2D = 55,
@@ -3300,12 +3246,14 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0, with shape
* [num_boxes, num_keypoints], specifying score of the keypoints.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from input0 scale and zeroPoint.
* * 1: A tensor of the same {@link OperandType} as input1, with shape
* [num_boxes, num_keypoints, 2], specifying the location of
* the keypoints, the second dimension is organized as
* [keypoint_x, keypoint_y].
- *
- * Available since API level 29.
+ * For type of {@link OperandType::TENSOR_QUANT16_ASYMM}, the
+ * scale must be 0.125 and the zero point must be 0.
*/
HEATMAP_MAX_KEYPOINT = 56,
@@ -3339,26 +3287,24 @@
* * 0: An n-D tensor, specifying the tensor to be normalized.
* * 1: A scalar, specifying gamma, the scale applied to the normalized
* tensor. The scalar must be of {@link OperandType::FLOAT16} if
- * input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
- * OperandType::FLOAT32} if input0 is of {@link
- * OperandType::TENSOR_FLOAT32}.
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
* * 2: A scalar, specifying beta, the offset applied to the normalized
* tensor. The scalar must be of {@link OperandType::FLOAT16} if
- * input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
- * OperandType::FLOAT32} if input0 is of {@link
- * OperandType::TENSOR_FLOAT32}.
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
* * 3: A scalar, specifying epsilon, the small value added to variance to
* avoid dividing by zero. The scalar must be of {@link OperandType::FLOAT16} if
- * input0 is of {@link OperandType::TENSOR_FLOAT16} and of {@link
- * OperandType::FLOAT32} if input0 is of {@link
- * OperandType::TENSOR_FLOAT32}.
+ * input0 is of {@link OperandType::TENSOR_FLOAT16} and of
+ * {@link OperandType::FLOAT32} if input0 is of
+ * {@link OperandType::TENSOR_FLOAT32}.
* * 4: An {@link OperandType::BOOL} scalar, set to true to specify
* NCHW data layout for input0 and output0. Set to false for NHWC.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} and same shape as input0.
- *
- * Available since API level 29.
*/
INSTANCE_NORMALIZATION = 57,
@@ -3383,8 +3329,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
LESS = 58,
@@ -3409,8 +3353,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
LESS_EQUAL = 59,
@@ -3428,8 +3370,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
LOG = 60,
@@ -3450,8 +3390,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
LOGICAL_AND = 61,
@@ -3468,8 +3406,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
LOGICAL_NOT = 62,
@@ -3490,8 +3426,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
LOGICAL_OR = 63,
@@ -3523,8 +3457,6 @@
* Outputs:
* * 0: The output tensor of the same {@link OperandType} and shape as
* input0.
- *
- * Available since API level 29.
*/
LOG_SOFTMAX = 64,
@@ -3543,11 +3475,13 @@
* * 0: A tensor.
* * 1: A tensor of the same {@link OperandType} and compatible dimensions
* with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
MAXIMUM = 65,
@@ -3566,11 +3500,13 @@
* * 0: A tensor.
* * 1: A tensor of the same {@link OperandType} and compatible dimensions
* with input0.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input0 scale and zeroPoint.
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
MINIMUM = 66,
@@ -3589,8 +3525,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
NEG = 67,
@@ -3615,8 +3549,6 @@
*
* Outputs:
* * 0: A tensor of {@link OperandType::TENSOR_BOOL8}.
- *
- * Available since API level 29.
*/
NOT_EQUAL = 68,
@@ -3657,8 +3589,8 @@
* of the padding:
* output0.dimension[i] =
* padding[i, 0] + input0.dimension[i] + padding[i, 1]
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
PAD_V2 = 69,
@@ -3689,8 +3621,6 @@
*
* Outputs:
* * 0: An output tensor.
- *
- * Available since API level 29.
*/
POW = 70,
@@ -3728,8 +3658,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be diffent from the input0 scale and zeroPoint.
*/
PRELU = 71,
@@ -3752,8 +3682,6 @@
* Outputs:
* * 0: The output tensor of same shape as input0, but with
* {@link OperandType::TENSOR_QUANT8_ASYMM}.
- *
- * Available since API level 29.
*/
QUANTIZE = 72,
@@ -3879,8 +3807,6 @@
* Outputs:
* * 0: A 2-D {@link OperandType::TENSOR_INT32} tensor with shape
* [batches, samples], containing the drawn samples.
- *
- * Available since API level 29.
*/
RANDOM_MULTINOMIAL = 74,
@@ -3906,8 +3832,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
*/
REDUCE_ALL = 75,
@@ -3933,8 +3857,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
*/
REDUCE_ANY = 76,
@@ -3962,8 +3884,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
REDUCE_MAX = 77,
@@ -3991,8 +3913,8 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
REDUCE_MIN = 78,
@@ -4018,8 +3940,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
*/
REDUCE_PROD = 79,
@@ -4045,8 +3965,6 @@
*
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0.
- *
- * Available since API level 29.
*/
REDUCE_SUM = 80,
@@ -4064,7 +3982,7 @@
* interpolation.
*
* Supported tensor {@link OperandType}:
- * * {@link OperandType::TENSOR_FLOAT16} (since API level 29)
+ * * {@link OperandType::TENSOR_FLOAT16}
* * {@link OperandType::TENSOR_FLOAT32}
* * {@link OperandType::TENSOR_QUANT8_ASYMM}
*
@@ -4105,8 +4023,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0. The output
* shape is [num_rois, out_height, out_width, depth].
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from the input0 scale and zeroPoint.
*/
ROI_ALIGN = 81,
@@ -4156,8 +4074,8 @@
* Outputs:
* * 0: A tensor of the same {@link OperandType} as input0. The output
* shape is [num_rois, out_height, out_width, depth].
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
ROI_POOLING = 82,
@@ -4175,8 +4093,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
RSQRT = 83,
@@ -4201,9 +4117,13 @@
* true) or input2 (if false).
* * 1: An input tensor of the same shape as input0.
* * 2: An input tensor of the same shape and type as input1.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scales and zeroPoint can be different from input1 scale and zeroPoint.
*
* Outputs:
* * 0: A tensor of the same type and shape as input1 and input2.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*
*/
SELECT = 84,
@@ -4222,8 +4142,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
SIN = 85,
@@ -4235,7 +4153,6 @@
* for each dimension. The size is specified as a 1-D tensor containing
* either size of a slice along corresponding dimension or -1. In the latter
* case, all the remaining elements in dimension are included in the slice.
- * Slice size in each dimension cannot be zero.
*
* A sum of begin offset and a size of a slice must not exceed size of a
* corresponding dimension.
@@ -4257,8 +4174,8 @@
*
* Outputs:
* * 0: An n-D tensor of the same type as the input containing the slice.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * its scale and zeroPoint has to be same as the input0 scale and zeroPoint.
*/
SLICE = 86,
@@ -4282,8 +4199,8 @@
*
* Outputs:
* * 0 ~ (num_splits - 1): Resulting subtensors.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
SPLIT = 87,
@@ -4301,8 +4218,6 @@
*
* Outputs:
* * 0: The output tensor of same shape as input0.
- *
- * Available since API level 29.
*/
SQRT = 88,
@@ -4330,8 +4245,8 @@
*
* Outputs:
* * 0: A tiled tensor of the same {@link OperandType} and rank as `input`.
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
TILE = 89,
@@ -4357,10 +4272,10 @@
* Outputs:
* * 0: An n-D tensor of the same type as the input, containing the k
* largest elements along each last dimensional slice.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
* * 1: An n-D tensor of type {@link OperandType::TENSOR_INT32}
* containing the indices of values within the last dimension of input.
- *
- * Available since API level 29.
*/
TOPK_V2 = 90,
@@ -4374,7 +4289,7 @@
* The output dimensions are functions of the filter dimensions, stride, and
* padding.
*
- * Supported tensor {@link OperandCode} configurations:
+ * Supported tensor {@link OperandType} configurations:
* * 16 bit floating point:
* * * {@link OperandType::TENSOR_FLOAT16} for input, filter, output, and bias.
*
@@ -4406,7 +4321,7 @@
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (extraParams.channelQuant.channelDim) must be set to 0.
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
@@ -4443,7 +4358,7 @@
* [depth_out, filter_height, filter_width, depth_in], specifying the
* filter. For tensor of type
* {@link OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL} the channel
- * dimension (extraParams.channelQuant.channelDim) must be set to 0.
+ * dimension (SymmPerChannelQuantParams::channelDim) must be set to 0.
* * 2: A 1-D tensor, of shape [depth_out], specifying the bias. For input
* tensor of type {@link OperandType::TENSOR_FLOAT32} or
* {@link OperandType::TENSOR_FLOAT16}, the bias should be of the
@@ -4473,8 +4388,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, out_height, out_width, depth_out].
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint can be different from inputs' scale and zeroPoint.
*/
TRANSPOSE_CONV_2D = 91,
@@ -4584,8 +4499,6 @@
* A 3-D tensor of shape:
* If time-major: [max_time, batch_size, output_size]
* If batch-major: [batch_size, max_time, output_size]
- *
- * Available since API level 29.
*/
UNIDIRECTIONAL_SEQUENCE_LSTM = 92,
@@ -4641,8 +4554,6 @@
* it is set to 1, then the output has a shape [maxTime, batchSize,
* numUnits], otherwise the output has a shape [batchSize, maxTime,
* numUnits].
- *
- * Available since API level 29.
*/
UNIDIRECTIONAL_SEQUENCE_RNN = 93,
@@ -4696,8 +4607,8 @@
* Outputs:
* * 0: The output 4-D tensor, of shape
* [batches, new_height, new_width, depth].
- *
- * Available since API level 29.
+ * For a {@link OperandType::TENSOR_QUANT8_ASYMM} tensor,
+ * the scale and zeroPoint must be the same as input0.
*/
RESIZE_NEAREST_NEIGHBOR = 94,
diff --git a/neuralnetworks/1.2/types.t b/neuralnetworks/1.2/types.t
new file mode 100644
index 0000000..d197f6b
--- /dev/null
+++ b/neuralnetworks/1.2/types.t
@@ -0,0 +1,725 @@
+%% template file for generating types.hal.
+%% see frameworks/ml/nn/tools/api/README.md.
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.2;
+
+import @1.0::DataLocation;
+import @1.0::ErrorStatus;
+import @1.0::OperandLifeTime;
+import @1.0::OperandType;
+import @1.0::PerformanceInfo;
+import @1.1::OperationType;
+
+import android.hidl.safe_union@1.0::Monostate;
+
+enum Constant : uint32_t {
+ /**
+ * The byte size of the cache token.
+ */
+ BYTE_SIZE_OF_CACHE_TOKEN = 32,
+
+ /**
+ * The maximum number of files for each type of cache in compilation caching.
+ */
+ MAX_NUMBER_OF_CACHE_FILES = 32,
+};
+
+enum OperandType : @1.0::OperandType {
+%insert Operand_1.2
+%insert OEMDeprecationAndOperandTypeRangeMaxComment
+};
+
+/**
+ * The range of operand values in the OperandType enum.
+ */
+enum OperandTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operand_1.2_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10001,
+ BASE_MAX = 0xFFFF,
+};
+
+/**
+ * Operation types.
+ *
+ * The type of an operation in a model.
+ */
+enum OperationType : int32_t {
+
+%insert Operation_1.0
+
+%insert Operation_1.1
+
+%insert Operation_1.2
+
+ /**
+ * DEPRECATED. Since NNAPI 1.2, extensions are the preferred alternative to
+ * OEM operation and data types.
+ *
+ * This operation is OEM specific. It should only be used for OEM
+ * applications.
+ */
+ OEM_OPERATION = @1.1::OperationType:OEM_OPERATION,
+ /* ADDING A NEW FUNDAMENTAL OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM OPERATION REQUIRES UPDATING THE VALUE OF
+ * OperationTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of values in the OperationType enum.
+ */
+enum OperationTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operation_1.2_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10000,
+ BASE_MAX = 0xFFFF,
+};
+
+/**
+ * Device types.
+ *
+ * The type of NNAPI device.
+ */
+enum DeviceType : int32_t {
+ // Leaving 0 unused as it means unknown type in NDK NNAPI. There is no
+ // HAL equivalent of unknown type and a 1.2 HAL implementation must belong
+ // to one of the categories below.
+ /** The device does not fall into any category below. */
+ OTHER = 1,
+ /** The device runs NNAPI models on single or multi-core CPU. */
+ CPU = 2,
+ /** The device can run NNAPI models and also accelerate graphics APIs such
+ * as OpenGL ES and Vulkan. */
+ GPU = 3,
+ /** Dedicated accelerator for Machine Learning workloads. */
+ ACCELERATOR = 4,
+};
+
+/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ *
+ * Besides the values listed in {@link OperationType}, any value above
+ * {@link OperationTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * Parameters for TENSOR_QUANT8_SYMM_PER_CHANNEL operand.
+ */
+struct SymmPerChannelQuantParams {
+ /** Array of scaling values for each channel. Each value must be greater than zero. */
+ vec<float> scales;
+ /** Index of the channel dimension */
+ uint32_t channelDim;
+};
+
+/**
+ * Describes one operand of the model's graph.
+ */
+struct Operand {
+ /**
+ * The data type.
+ *
+ * Besides the values listed in {@link OperandType}, any value above
+ * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ *
+ * For a scalar operand, dimensions.size() must be 0.
+ *
+ * A tensor operand with all dimensions specified has "fully
+ * specified" dimensions. Whenever possible (i.e., whenever the
+ * dimensions are known at model construction time), a tensor
+ * operand should have (but is not required to have) fully
+ * specified dimensions, in order to enable the best possible
+ * performance.
+ *
+ * If a tensor operand's dimensions are not fully specified, the
+ * dimensions of the operand are deduced from the operand
+ * dimensions and values of the operation for which that operand
+ * is an output.
+ *
+ * In the following situations, a tensor operand's dimensions must
+ * be fully specified:
+ *
+ * . The operand has lifetime CONSTANT_COPY or
+ * CONSTANT_REFERENCE.
+ *
+ * . The operand has lifetime MODEL_INPUT. Fully
+ * specified dimensions must either be present in the
+ * Operand or they must be provided in the corresponding
+ * RequestArgument.
+ * EXCEPTION: If the input is optional and omitted
+ * (by setting the hasNoValue field of the corresponding
+ * RequestArgument to true) then it need not have fully
+ * specified dimensions.
+ *
+ * A tensor operand with some number of unspecified dimensions is
+ * represented by setting each unspecified dimension to 0.
+ *
+ * A tensor operand with unspecified rank is represented by providing
+ * an empty dimensions vector.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
+ */
+ uint32_t numberOfConsumers;
+
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
+ */
+ float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
+ int32_t zeroPoint;
+
+ /**
+ * How the operand is used.
+ */
+ OperandLifeTime lifetime;
+
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
+ * NO_VALUE:
+ * - All the fields must be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
+ DataLocation location;
+
+ /**
+ * Additional parameters specific to a particular operand type.
+ */
+ safe_union ExtraParams {
+ /**
+ * No additional parameters.
+ */
+ Monostate none;
+
+ /**
+ * Symmetric per-channel quantization parameters.
+ *
+ * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
+ */
+ SymmPerChannelQuantParams channelQuant;
+
+ /**
+ * Extension operand parameters.
+ *
+ * The framework treats this as an opaque data blob.
+ * The format is up to individual extensions.
+ */
+ vec<uint8_t> extension;
+ } extraParams;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * may not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+
+ /**
+ * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
+ * precision as low as that of the IEEE 754 16-bit floating-point format.
+ * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
+ * range and precision of the IEEE 754 32-bit floating-point format.
+ */
+ bool relaxComputationFloat32toFloat16;
+
+ /**
+ * The mapping between extension names and prefixes of operand and
+ * operation type values.
+ *
+ * An operand or operation whose numeric type value is above
+ * {@link OperandTypeRange::BASE_MAX} or
+ * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted
+ * as an extension operand. The low
+ * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value
+ * correspond to the type ID within the extension and the high
+ * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
+ * the "prefix", which maps uniquely to the extension name.
+ *
+ * For example, if a model contains an operation whose value is
+ * 0xAAAABBBB and extensionNameToPrefix contains an entry with
+ * prefix=0xAAAA and name="vendor.test.test_extension", then
+ * the operation should be interpreted as the operation 0xBBBB
+ * of the extension named vendor.test.test_extension.
+ *
+ * This is a one-to-one correspondence. That is, there must be at most one
+ * prefix corresponding to each extension name and at most one extension
+ * name corresponding to each prefix.
+ */
+ vec<ExtensionNameAndPrefix> extensionNameToPrefix;
+
+ /**
+ * A correspondence between an extension name and a prefix of operand and
+ * operation type values.
+ */
+ struct ExtensionNameAndPrefix {
+ /**
+ * The extension name.
+ *
+ * See {@link Extension::name} for the format specification.
+ */
+ string name;
+
+ /**
+ * The unique extension identifier within the model.
+ *
+ * See {@link Model::extensionNameToPrefix}.
+ */
+ uint16_t prefix;
+ };
+
+ /**
+ * Numeric values of extension operand and operation types have the
+ * following structure:
+ * - 16 high bits represent the "prefix", which corresponds uniquely to the
+ * extension name.
+ * - 16 low bits represent the type ID within the extension.
+ */
+ enum ExtensionTypeEncoding : uint8_t {
+ HIGH_BITS_PREFIX = 16,
+ LOW_BITS_TYPE = 16,
+ };
+};
+
+/**
+ * Describes the shape information of an output operand after execution.
+ */
+struct OutputShape {
+ /**
+ * Dimensions of the operand.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * Whether the provided buffer size is sufficient for the output.
+ */
+ bool isSufficient;
+};
+
+/**
+ * Specifies whether or not to measure timing information during execution.
+ */
+enum MeasureTiming : int32_t {
+ NO = 0,
+ YES = 1,
+};
+
+/**
+
+ * Timing information measured during execution. Each time is a duration from
+ * the beginning of some task to the end of that task, including time when that
+ * task is not active (for example, preempted by some other task, or
+ * waiting for some resource to become available).
+ *
+ * Times are measured in microseconds.
+ * When a time is not available, it must be reported as UINT64_MAX.
+ */
+struct Timing {
+ /** Execution time on device (not driver, which runs on host processor). */
+ uint64_t timeOnDevice;
+ /** Execution time in driver (including time on device). */
+ uint64_t timeInDriver;
+};
+
+/**
+ * FmqRequestDatum is a single element of a serialized representation of an
+ * execution request (a {@link @1.0::Request} object and a {@link MeasureTiming}
+ * value) which is sent across FastMessageQueue.
+ *
+ * The serialized representation for a particular execution is referred to later
+ * in these descriptions as a 'packet'.
+ *
+ * FastMessageQueue can only pass HIDL-defined types that do not involve nested
+ * buffers, handles, or interfaces.
+ *
+ * The request is serialized as follows:
+ * 1) 'packetInformation'
+ * 2) For each input operand:
+ * 2.1) 'inputOperandInformation'
+ * 2.2) For each dimension element of the operand:
+ * 2.2.1) 'inputOperandDimensionValue'
+ * 3) For each output operand:
+ * 3.1) 'outputOperandInformation'
+ * 3.2) For each dimension element of the operand:
+ * 3.2.1) 'outputOperandDimensionValue'
+ * 4) For each pool:
+ * 4.1) 'poolIdentifier'
+ * 5) 'measureTiming'
+ */
+safe_union FmqRequestDatum {
+ /**
+ * Type to describe the high-level layout of the packet.
+ */
+ struct PacketInformation {
+ /**
+ * How many elements the packet contains, including the
+ * "packetInformation" datum.
+ */
+ uint32_t packetSize;
+
+ /**
+ * Number of input operands.
+ */
+ uint32_t numberOfInputOperands;
+
+ /**
+ * Number of output operands.
+ */
+ uint32_t numberOfOutputOperands;
+
+ /**
+ * Number of pool identifiers.
+ */
+ uint32_t numberOfPools;
+ };
+
+ /**
+ * Type representing the information for each operand.
+ */
+ struct OperandInformation {
+ /**
+ * If true, the argument does not have a value. This can be used for
+ * operations that take optional arguments. If true, the fields of
+ * 'location' are set to 0, 'numberOfDimensions' is set to 0, and the
+ * dimensions information is omitted from the serialization.
+ */
+ bool hasNoValue;
+
+ /**
+ * The location within one of the memory pools passed in the Request.
+ */
+ DataLocation location;
+
+ /**
+ * Number of subsequent elements that belong to the dimensions vector.
+ */
+ uint32_t numberOfDimensions;
+ };
+
+ /**
+ * packetInformation is the first element of the packet and describes the
+ * remainder of the packet.
+ */
+ PacketInformation packetInformation;
+
+ /**
+ * Information for each input operand.
+ */
+ OperandInformation inputOperandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t inputOperandDimensionValue;
+
+ /**
+ * Information for each output operand.
+ */
+ OperandInformation outputOperandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t outputOperandDimensionValue;
+
+ /**
+ * Unique identifier for a pool.
+ *
+ * A {@link @1.0::Request} passes across one or more pools of shared memory
+ * for the inputs and outputs of an execution. However, these memory pools
+ * are not able to be sent across FastMessageQueue directly. Instead, the
+ * producing side of the FMQ represents each different pool with a unique
+ * identifier, and sends this identifier across the FMQ. Whenever the
+ * consuming side of the FMQ needs the memory corresponding to this unique
+ * identifier, it can pass the identifier to
+ * {@link IBurstCallback::getMemories} to retreive the memory. Although this
+ * HIDL Binder call is expensive compared to communication across FMQ, it is
+ * only needed in the cases when the consumer does not recognize the unique
+ * identifier.
+ */
+ int32_t poolIdentifier;
+
+ /**
+ * Specifies whether or not to measure duration of the execution. The
+ * duration runs from the time the driver dequeues the request from a
+ * FastMessageQueue to the time the driver enqueues results to a
+ * FastMessageQueue.
+ */
+ MeasureTiming measureTiming;
+};
+
+/**
+ * FmqResultDatum is a single element of a serialized representation of the
+ * values returned from an execution ({@link @1.0::ErrorStatus},
+ * vec<{@link OutputShape}>, and {@link Timing}) which is returned via
+ * FastMessageQueue.
+ *
+ * The serialized representation for a particular execution is referred to later
+ * in these descriptions as a 'packet'.
+ *
+ * FastMessageQueue can only pass HIDL-defined types that do not involve nested
+ * buffers, handles, or interfaces.
+ *
+ * The execution return values ({@link @1.0::ErrorStatus} and
+ * vec<{@link OutputShape}>) are serialized as follows:
+ * 1) 'packetInformation'
+ * 2) For each returned operand:
+ * 2.1) 'operandInformation'
+ * 2.2) For each dimension element of the operand:
+ * 2.2.1) 'operandDimensionValue'
+ * 3) 'executionTiming'
+ */
+safe_union FmqResultDatum {
+ /**
+ * Type to describe the high-level layout of the packet.
+ */
+ struct PacketInformation {
+ /**
+ * How many elements the packet contains, including the
+ * "packetInformation" datum.
+ */
+ uint32_t packetSize;
+
+ /**
+ * Status of the execution.
+ */
+ ErrorStatus errorStatus;
+
+ /**
+ * Number of returned operands.
+ */
+ uint32_t numberOfOperands;
+ };
+
+ /**
+ * Type representing the information for each operand.
+ */
+ struct OperandInformation {
+ /**
+ * Indicates whether the operand's output buffer is large enough to
+ * store the operand's result data.
+ */
+ bool isSufficient;
+
+ /**
+ * Number of subsequent elements that belong to the dimensions vector.
+ */
+ uint32_t numberOfDimensions;
+ };
+
+ /**
+ * packetInformation is the first element of the packet and describes the
+ * remainder of the packet. It additionally includes the status of the
+ * execution.
+ */
+ PacketInformation packetInformation;
+
+ /**
+ * Information for each returned operand.
+ */
+ OperandInformation operandInformation;
+
+ /**
+ * Element of the dimensions vector.
+ */
+ uint32_t operandDimensionValue;
+
+ /**
+ * Duration of execution. Unless measurement was requested and execution
+ * succeeds, all times must be reported as UINT64_MAX. A driver may choose
+ * to report any time as UINT64_MAX, indicating that measurement is not
+ * available.
+ */
+ Timing executionTiming;
+};
+
+/**
+ * Information about an extension.
+ */
+struct Extension {
+ /**
+ * The extension name.
+ *
+ * The name must consist of lowercase latin letters, numbers, periods, and
+ * underscore signs. The name must contain at least one period.
+ *
+ * The name must start with the reverse domain name of the vendor.
+ *
+ * Example: com.google.test_extension
+ */
+ string name;
+
+ /**
+ * Information about an extension operand type.
+ */
+ struct OperandTypeInformation {
+ /**
+ * The extension operand type.
+ */
+ uint16_t type;
+
+ /**
+ * Indicates whether the extension operand type represents a tensor or
+ * a scalar.
+ */
+ bool isTensor;
+
+ /**
+ * The byte size of the operand (if scalar) or of a single element (if
+ * tensor).
+ */
+ uint32_t byteSize;
+ };
+
+ /**
+ * Information about operand types defined by the extension.
+ */
+ vec<OperandTypeInformation> operandTypes;
+};
diff --git a/neuralnetworks/1.2/vts/functional/Android.bp b/neuralnetworks/1.2/vts/functional/Android.bp
index 40ca809..bdca0e9 100644
--- a/neuralnetworks/1.2/vts/functional/Android.bp
+++ b/neuralnetworks/1.2/vts/functional/Android.bp
@@ -14,16 +14,35 @@
// limitations under the License.
//
-cc_defaults {
- name: "VtsHalNeuralNetworksV1_2TargetTestDefaults",
+cc_library_static {
+ name: "VtsHalNeuralNetworksV1_2Callbacks",
+ defaults: ["VtsHalTargetTestDefaults"],
+ export_include_dirs: ["include"],
+ srcs: [
+ "Callbacks.cpp",
+ ],
+ static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hardware.neuralnetworks@1.2",
+ ],
+ header_libs: [
+ "libbase_headers",
+ ]
+}
+
+cc_test {
+ name: "VtsHalNeuralnetworksV1_2TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
+ "BasicTests.cpp",
+ "CompilationCachingTests.cpp",
+ "GeneratedTestHarness.cpp",
"TestAssertions.cpp",
"ValidateModel.cpp",
"ValidateRequest.cpp",
+ "ValidateBurst.cpp",
"VtsHalNeuralnetworks.cpp",
- "Callbacks.cpp",
- "GeneratedTestHarness.cpp",
],
local_include_dirs: ["include"],
shared_libs: [
@@ -34,6 +53,7 @@
"android.hardware.neuralnetworks@1.0",
"android.hardware.neuralnetworks@1.1",
"android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
"android.hidl.allocator@1.0",
"android.hidl.memory@1.0",
"libgmock",
@@ -41,42 +61,15 @@
"libneuralnetworks_generated_test_harness",
"libneuralnetworks_utils",
"VtsHalNeuralNetworksV1_0_utils",
+ "VtsHalNeuralNetworksV1_2Callbacks",
+ ],
+ whole_static_libs: [
+ "neuralnetworks_generated_V1_0_example",
+ "neuralnetworks_generated_V1_1_example",
+ "neuralnetworks_generated_V1_2_example",
],
header_libs: [
"libneuralnetworks_headers",
],
- test_suites: ["general-tests"],
-}
-
-cc_test {
- name: "VtsHalNeuralnetworksV1_2TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- "CompilationCachingTests.cpp",
- "ValidateBurst.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- "neuralnetworks_generated_V1_1_example",
- "neuralnetworks_generated_V1_2_example",
- ],
-}
-
-cc_test {
- name: "PresubmitHalNeuralnetworksV1_2TargetTest",
- defaults: ["VtsHalNeuralNetworksV1_2TargetTestDefaults"],
- srcs: [
- "BasicTests.cpp",
- "CompilationCachingTests.cpp",
- "ValidateBurst.cpp",
- ],
- whole_static_libs: [
- "neuralnetworks_generated_V1_0_example",
- "neuralnetworks_generated_V1_1_example",
- "neuralnetworks_generated_V1_2_example",
- ],
- cflags: [
- "-DPRESUBMIT_NOT_VTS",
- ],
+ test_suites: ["general-tests", "vts-core"],
}
diff --git a/neuralnetworks/1.2/vts/functional/BasicTests.cpp b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
index 8f95b96..8e82c53 100644
--- a/neuralnetworks/1.2/vts/functional/BasicTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/BasicTests.cpp
@@ -25,17 +25,17 @@
using V1_0::PerformanceInfo;
// create device test
-TEST_F(NeuralnetworksHidlTest, CreateDevice) {}
+TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
// status test
-TEST_F(NeuralnetworksHidlTest, StatusTest) {
+TEST_P(NeuralnetworksHidlTest, StatusTest) {
Return<DeviceStatus> status = kDevice->getStatus();
ASSERT_TRUE(status.isOk());
EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
}
// initialization
-TEST_F(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
using OperandPerformance = Capabilities::OperandPerformance;
Return<void> ret = kDevice->getCapabilities_1_2([](ErrorStatus status,
const Capabilities& capabilities) {
@@ -60,7 +60,7 @@
}
// device version test
-TEST_F(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
+TEST_P(NeuralnetworksHidlTest, GetDeviceVersionStringTest) {
Return<void> ret =
kDevice->getVersionString([](ErrorStatus status, const hidl_string& version) {
EXPECT_EQ(ErrorStatus::NONE, status);
@@ -70,7 +70,7 @@
}
// device type test
-TEST_F(NeuralnetworksHidlTest, GetDeviceTypeTest) {
+TEST_P(NeuralnetworksHidlTest, GetDeviceTypeTest) {
Return<void> ret = kDevice->getType([](ErrorStatus status, DeviceType type) {
EXPECT_EQ(ErrorStatus::NONE, status);
EXPECT_TRUE(type == DeviceType::OTHER || type == DeviceType::CPU ||
@@ -80,7 +80,7 @@
}
// device supported extensions test
-TEST_F(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
+TEST_P(NeuralnetworksHidlTest, GetDeviceSupportedExtensionsTest) {
Return<void> ret = kDevice->getSupportedExtensions(
[](ErrorStatus status, const hidl_vec<Extension>& extensions) {
EXPECT_EQ(ErrorStatus::NONE, status);
@@ -101,7 +101,7 @@
}
// getNumberOfCacheFilesNeeded test
-TEST_F(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
+TEST_P(NeuralnetworksHidlTest, getNumberOfCacheFilesNeeded) {
Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
[](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
EXPECT_EQ(ErrorStatus::NONE, status);
diff --git a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
index bb46e06..2130a76 100644
--- a/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
+++ b/neuralnetworks/1.2/vts/functional/CompilationCachingTests.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include <android-base/logging.h>
+#include <fcntl.h>
#include <ftw.h>
#include <gtest/gtest.h>
#include <hidlmemory/mapping.h>
@@ -37,11 +38,11 @@
// Forward declaration of the mobilenet generated test models in
// frameworks/ml/nn/runtime/test/generated/.
namespace generated_tests::mobilenet_224_gender_basic_fixed {
-const ::test_helper::TestModel& get_test_model();
+const test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_224_gender_basic_fixed
namespace generated_tests::mobilenet_quantized {
-const ::test_helper::TestModel& get_test_model();
+const test_helper::TestModel& get_test_model();
} // namespace generated_tests::mobilenet_quantized
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
@@ -53,13 +54,13 @@
namespace float32_model {
-constexpr auto get_test_model = ::generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
+constexpr auto get_test_model = generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
} // namespace float32_model
namespace quant8_model {
-constexpr auto get_test_model = ::generated_tests::mobilenet_quantized::get_test_model;
+constexpr auto get_test_model = generated_tests::mobilenet_quantized::get_test_model;
} // namespace quant8_model
@@ -217,12 +218,13 @@
} // namespace
// Tag for the compilation caching tests.
-class CompilationCachingTestBase : public NeuralnetworksHidlTest {
+class CompilationCachingTestBase : public testing::Test {
protected:
- CompilationCachingTestBase(OperandType type) : kOperandType(type) {}
+ CompilationCachingTestBase(sp<IDevice> device, OperandType type)
+ : kDevice(std::move(device)), kOperandType(type) {}
void SetUp() override {
- NeuralnetworksHidlTest::SetUp();
+ testing::Test::SetUp();
ASSERT_NE(kDevice.get(), nullptr);
// Create cache directory. The cache directory and a temporary cache file is always created
@@ -274,7 +276,7 @@
};
nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
}
- NeuralnetworksHidlTest::TearDown();
+ testing::Test::TearDown();
}
// Model and examples creators. According to kOperandType, the following methods will return
@@ -398,16 +400,21 @@
uint32_t mNumDataCache;
uint32_t mIsCachingSupported;
+ const sp<IDevice> kDevice;
// The primary data type of the testModel.
const OperandType kOperandType;
};
+using CompilationCachingTestParam = std::tuple<NamedDevice, OperandType>;
+
// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
// pass running with float32 models and the second pass running with quant8 models.
class CompilationCachingTest : public CompilationCachingTestBase,
- public testing::WithParamInterface<OperandType> {
+ public testing::WithParamInterface<CompilationCachingTestParam> {
protected:
- CompilationCachingTest() : CompilationCachingTestBase(GetParam()) {}
+ CompilationCachingTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
};
TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
@@ -1192,16 +1199,30 @@
}
}
+static const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
static const auto kOperandTypeChoices =
testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
-INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest, kOperandTypeChoices);
+std::string printCompilationCachingTest(
+ const testing::TestParamInfo<CompilationCachingTestParam>& info) {
+ const auto& [namedDevice, operandType] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type);
+}
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest,
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices),
+ printCompilationCachingTest);
+
+using CompilationCachingSecurityTestParam = std::tuple<NamedDevice, OperandType, uint32_t>;
class CompilationCachingSecurityTest
: public CompilationCachingTestBase,
- public testing::WithParamInterface<std::tuple<OperandType, uint32_t>> {
+ public testing::WithParamInterface<CompilationCachingSecurityTestParam> {
protected:
- CompilationCachingSecurityTest() : CompilationCachingTestBase(std::get<0>(GetParam())) {}
+ CompilationCachingSecurityTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
void SetUp() {
CompilationCachingTestBase::SetUp();
@@ -1291,7 +1312,7 @@
}
}
- const uint32_t kSeed = std::get<1>(GetParam());
+ const uint32_t kSeed = std::get<uint32_t>(GetParam());
std::mt19937 generator;
};
@@ -1338,7 +1359,16 @@
});
}
+std::string printCompilationCachingSecurityTest(
+ const testing::TestParamInfo<CompilationCachingSecurityTestParam>& info) {
+ const auto& [namedDevice, operandType, seed] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + std::to_string(seed));
+}
+
INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
- testing::Combine(kOperandTypeChoices, testing::Range(0U, 10U)));
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices,
+ testing::Range(0U, 10U)),
+ printCompilationCachingSecurityTest);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index a2d3792..aacb385 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -33,6 +33,7 @@
#include <gtest/gtest.h>
#include <algorithm>
+#include <chrono>
#include <iostream>
#include <numeric>
@@ -190,7 +191,8 @@
}
static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
const sp<IPreparedModel>& preparedModel) {
- return ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
+ return android::nn::ExecutionBurstController::create(preparedModel,
+ std::chrono::microseconds{0});
}
enum class Executor { ASYNC, SYNC, BURST };
@@ -254,8 +256,10 @@
}
// execute burst
- std::tie(executionStatus, outputShapes, timing) =
+ int n;
+ std::tie(n, outputShapes, timing, std::ignore) =
controller->compute(request, measure, keys);
+ executionStatus = nn::convertResultCodeToErrorStatus(n);
break;
}
@@ -371,6 +375,20 @@
EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
}
+void GeneratedTestBase::SetUp() {
+ testing::TestWithParam<GeneratedTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
+}
+
+std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
+ return TestModelManager::get().getTestModels(filter);
+}
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
+ const auto& [namedDevice, namedModel] = info.param;
+ return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
+}
+
// Tag for the generated tests
class GeneratedTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
index 0b8b917..dfc980c 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -22,34 +22,43 @@
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <functional>
#include <vector>
+#include "1.0/Utils.h"
#include "TestHarness.h"
#include "VtsHalNeuralnetworks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
-class GeneratedTestBase
- : public NeuralnetworksHidlTest,
- public testing::WithParamInterface<test_helper::TestModelManager::TestParam> {
+using NamedModel = Named<const test_helper::TestModel*>;
+using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
+
+class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
protected:
- const test_helper::TestModel& kTestModel = *GetParam().second;
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
+ const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
};
-#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
- INSTANTIATE_TEST_SUITE_P( \
- TestGenerated, TestSuite, \
- testing::ValuesIn(::test_helper::TestModelManager::get().getTestModels(filter)), \
- [](const auto& info) { return info.param.first; })
+using FilterFn = std::function<bool(const test_helper::TestModel&)>;
+std::vector<NamedModel> getNamedModels(const FilterFn& filter);
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
+
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
+ INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \
+ testing::Combine(testing::ValuesIn(getNamedDevices()), \
+ testing::ValuesIn(getNamedModels(filter))), \
+ printGeneratedTest)
// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
// TODO: Clean up the hierarchy for ValidationTest.
class ValidationTest : public GeneratedTestBase {};
-Model createModel(const ::test_helper::TestModel& testModel);
+Model createModel(const test_helper::TestModel& testModel);
void PrepareModel(const sp<IDevice>& device, const Model& model, sp<IPreparedModel>* preparedModel);
void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel,
- const ::test_helper::TestModel& testModel, bool testDynamicOutputShape);
+ const test_helper::TestModel& testModel, bool testDynamicOutputShape);
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
index c02d020..416744f 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateBurst.cpp
@@ -26,6 +26,7 @@
#include "Utils.h"
#include <android-base/logging.h>
+#include <chrono>
#include <cstring>
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
@@ -64,9 +65,9 @@
// create FMQ objects
auto [fmqRequestChannel, fmqRequestDescriptor] =
- RequestChannelSender::create(kExecutionBurstChannelLength, /*blocking=*/true);
+ RequestChannelSender::create(kExecutionBurstChannelLength);
auto [fmqResultChannel, fmqResultDescriptor] =
- ResultChannelReceiver::create(resultChannelLength, /*blocking=*/true);
+ ResultChannelReceiver::create(resultChannelLength, std::chrono::microseconds{0});
ASSERT_NE(nullptr, fmqRequestChannel.get());
ASSERT_NE(nullptr, fmqResultChannel.get());
ASSERT_NE(nullptr, fmqRequestDescriptor);
@@ -262,7 +263,7 @@
}));
// serialize the request
- const auto serialized = ::android::nn::serialize(request, MeasureTiming::YES, slots);
+ const auto serialized = android::nn::serialize(request, MeasureTiming::YES, slots);
// validations
removeDatumTest(sender.get(), receiver.get(), serialized);
@@ -293,13 +294,15 @@
}
// collect serialized result by running regular burst
- const auto [statusRegular, outputShapesRegular, timingRegular] =
+ const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
controllerRegular->compute(request, MeasureTiming::NO, keys);
+ const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
+ EXPECT_FALSE(fallbackRegular);
// skip test if regular burst output isn't useful for testing a failure
// caused by having too small of a length for the result FMQ
const std::vector<FmqResultDatum> serialized =
- ::android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+ android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
if (statusRegular != ErrorStatus::NONE ||
serialized.size() <= kExecutionBurstChannelSmallLength) {
return;
@@ -307,11 +310,13 @@
// by this point, execution should fail because the result channel isn't
// large enough to return the serialized result
- const auto [statusSmall, outputShapesSmall, timingSmall] =
+ const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
controllerSmall->compute(request, MeasureTiming::NO, keys);
+ const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
EXPECT_NE(ErrorStatus::NONE, statusSmall);
EXPECT_EQ(0u, outputShapesSmall.size());
EXPECT_TRUE(badTiming(timingSmall));
+ EXPECT_FALSE(fallbackSmall);
}
static bool isSanitized(const FmqResultDatum& datum) {
diff --git a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
index 5c52de5..2d83b81 100644
--- a/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/1.2/vts/functional/ValidateRequest.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
+#include <chrono>
#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
#include "ExecutionBurstController.h"
@@ -94,7 +95,8 @@
// create burst
std::shared_ptr<::android::nn::ExecutionBurstController> burst =
- ::android::nn::ExecutionBurstController::create(preparedModel, /*blocking=*/true);
+ android::nn::ExecutionBurstController::create(preparedModel,
+ std::chrono::microseconds{0});
ASSERT_NE(nullptr, burst.get());
// create memory keys
@@ -104,13 +106,12 @@
}
// execute and verify
- ErrorStatus error;
- std::vector<OutputShape> outputShapes;
- Timing timing;
- std::tie(error, outputShapes, timing) = burst->compute(request, measure, keys);
- EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
+ const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
+ const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
EXPECT_EQ(outputShapes.size(), 0);
EXPECT_TRUE(badTiming(timing));
+ EXPECT_FALSE(fallback);
// additional burst testing
if (request.pools.size() > 0) {
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index aa4f1f2..4fbd0e2 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -17,13 +17,15 @@
#define LOG_TAG "neuralnetworks_hidl_hal_test"
#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+#include <hidl/ServiceManagement.h>
+#include <string>
+#include <utility>
#include "1.0/Callbacks.h"
#include "1.0/Utils.h"
#include "GeneratedTestHarness.h"
#include "TestHarness.h"
-#include <android-base/logging.h>
-
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
using implementation::PreparedModelCallback;
@@ -82,34 +84,39 @@
ASSERT_NE(nullptr, preparedModel->get());
}
-// A class for test environment setup
-NeuralnetworksHidlEnvironment* NeuralnetworksHidlEnvironment::getInstance() {
- // This has to return a "new" object because it is freed inside
- // testing::AddGlobalTestEnvironment when the gtest is being torn down
- static NeuralnetworksHidlEnvironment* instance = new NeuralnetworksHidlEnvironment();
- return instance;
-}
-
-void NeuralnetworksHidlEnvironment::registerTestServices() {
- registerTestService<IDevice>();
-}
-
-// The main test class for NEURALNETWORK HIDL HAL.
void NeuralnetworksHidlTest::SetUp() {
- testing::VtsHalHidlTargetTestBase::SetUp();
-
-#ifdef PRESUBMIT_NOT_VTS
- const std::string name =
- NeuralnetworksHidlEnvironment::getInstance()->getServiceName<IDevice>();
- const std::string sampleDriver = "sample-";
- if (kDevice == nullptr && name.substr(0, sampleDriver.size()) == sampleDriver) {
- GTEST_SKIP();
- }
-#endif // PRESUBMIT_NOT_VTS
-
- ASSERT_NE(nullptr, kDevice.get());
+ testing::TestWithParam<NeuralnetworksHidlTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
}
+static NamedDevice makeNamedDevice(const std::string& name) {
+ return {name, IDevice::getService(name)};
+}
+
+static std::vector<NamedDevice> getNamedDevicesImpl() {
+ // Retrieves the name of all service instances that implement IDevice,
+ // including any Lazy HAL instances.
+ const std::vector<std::string> names = hardware::getAllHalInstanceNames(IDevice::descriptor);
+
+ // Get a handle to each device and pair it with its name.
+ std::vector<NamedDevice> namedDevices;
+ namedDevices.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
+ return namedDevices;
+}
+
+const std::vector<NamedDevice>& getNamedDevices() {
+ const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
+ return devices;
+}
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info) {
+ return gtestCompliantName(getName(info.param));
+}
+
+INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
+
// Forward declaration from ValidateModel.cpp
void validateModel(const sp<IDevice>& device, const Model& model);
// Forward declaration from ValidateRequest.cpp
@@ -162,14 +169,3 @@
}
} // namespace android::hardware::neuralnetworks::V1_2::vts::functional
-
-using android::hardware::neuralnetworks::V1_2::vts::functional::NeuralnetworksHidlEnvironment;
-
-int main(int argc, char** argv) {
- testing::AddGlobalTestEnvironment(NeuralnetworksHidlEnvironment::getInstance());
- testing::InitGoogleTest(&argc, argv);
- NeuralnetworksHidlEnvironment::getInstance()->init(&argc, argv);
-
- int status = RUN_ALL_TESTS();
- return status;
-}
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
index 9981696..d01336e 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.h
@@ -17,42 +17,33 @@
#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
#define ANDROID_HARDWARE_NEURALNETWORKS_V1_2_VTS_HAL_NEURALNETWORKS_H
-#include <VtsHalHidlTargetTestBase.h>
-#include <VtsHalHidlTargetTestEnvBase.h>
-#include <android-base/macros.h>
-#include <android/hardware/neuralnetworks/1.0/types.h>
-#include <android/hardware/neuralnetworks/1.1/types.h>
#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
#include <android/hardware/neuralnetworks/1.2/types.h>
#include <gtest/gtest.h>
-
+#include "1.0/Utils.h"
#include "1.2/Callbacks.h"
namespace android::hardware::neuralnetworks::V1_2::vts::functional {
-// A class for test environment setup
-class NeuralnetworksHidlEnvironment : public testing::VtsHalHidlTargetTestEnvBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlEnvironment);
- NeuralnetworksHidlEnvironment() = default;
+using NamedDevice = Named<sp<IDevice>>;
+using NeuralnetworksHidlTestParam = NamedDevice;
- public:
- static NeuralnetworksHidlEnvironment* getInstance();
- void registerTestServices() override;
-};
-
-// The main test class for NEURALNETWORKS HIDL HAL.
-class NeuralnetworksHidlTest : public testing::VtsHalHidlTargetTestBase {
- DISALLOW_COPY_AND_ASSIGN(NeuralnetworksHidlTest);
-
- public:
- NeuralnetworksHidlTest() = default;
- void SetUp() override;
-
+class NeuralnetworksHidlTest : public testing::TestWithParam<NeuralnetworksHidlTestParam> {
protected:
- const sp<IDevice> kDevice = testing::VtsHalHidlTargetTestBase::getService<IDevice>(
- NeuralnetworksHidlEnvironment::getInstance());
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(GetParam());
};
+const std::vector<NamedDevice>& getNamedDevices();
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info);
+
+#define INSTANTIATE_DEVICE_TEST(TestSuite) \
+ INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \
+ printNeuralnetworksHidlTest)
+
// Create an IPreparedModel object. If the model cannot be prepared,
// "preparedModel" will be nullptr instead.
void createPreparedModel(const sp<IDevice>& device, const Model& model,
diff --git a/neuralnetworks/1.3/Android.bp b/neuralnetworks/1.3/Android.bp
new file mode 100644
index 0000000..0615ec6
--- /dev/null
+++ b/neuralnetworks/1.3/Android.bp
@@ -0,0 +1,21 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.neuralnetworks@1.3",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IDevice.hal",
+ ],
+ interfaces: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hardware.neuralnetworks@1.2",
+ "android.hidl.base@1.0",
+ "android.hidl.safe_union@1.0",
+ ],
+ gen_java: false,
+}
diff --git a/neuralnetworks/1.3/IDevice.hal b/neuralnetworks/1.3/IDevice.hal
new file mode 100644
index 0000000..ee36fb4
--- /dev/null
+++ b/neuralnetworks/1.3/IDevice.hal
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.3;
+
+import @1.0::ErrorStatus;
+import @1.1::ExecutionPreference;
+import @1.2::Constant;
+import @1.2::DeviceType;
+import @1.2::Extension;
+import @1.2::IDevice;
+import @1.2::IPreparedModelCallback;
+
+/**
+ * This interface represents a device driver.
+ */
+interface IDevice extends @1.2::IDevice {
+ /**
+ * Gets the capabilities of a driver.
+ *
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * @return capabilities Capabilities of the driver.
+ */
+ getCapabilities_1_3() generates (ErrorStatus status, Capabilities capabilities);
+
+ /**
+ * Gets the supported operations in a model.
+ *
+ * getSupportedOperations indicates which operations of a model are fully
+ * supported by the vendor driver. If an operation may not be supported for
+ * any reason, getSupportedOperations must return false for that operation.
+ *
+ * @param model A model whose operations--and their corresponding operands--
+ * are to be verified by the driver.
+ * @return status Error status of the call, must be:
+ * - NONE if successful
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if provided model is invalid
+ * @return supportedOperations A list of supported operations, where true
+ * indicates the operation is supported and false indicates the
+ * operation is not supported. The index of "supported" corresponds with
+ * the index of the operation it is describing.
+ */
+ getSupportedOperations_1_3(Model model)
+ generates (ErrorStatus status, vec<bool> supportedOperations);
+
+ /**
+ * Asynchronously creates a prepared model for execution and optionally
+ * saves it into cache files.
+ *
+ * prepareModel is used to make any necessary transformations to or
+ * alternative representations to a model for execution, possibly including
+ * transformations on the constant data, optimization on the model's graph,
+ * or compilation into the device's native binary format. The model itself
+ * is not changed.
+ *
+ * Optionally, caching information may be provided for the driver to save
+ * the prepared model to cache files for faster model compilation time when
+ * the same model preparation is requested in the future. There are two
+ * types of cache file handles provided to the driver: model cache and data
+ * cache. For more information on the two types of cache handles, refer to
+ * getNumberOfCacheFilesNeeded.
+ *
+ * The file descriptors must be opened with read and write permission. A
+ * file may have any size, and the corresponding file descriptor may have
+ * any offset. The driver must truncate a file to zero size before writing
+ * to that file. The file descriptors may be closed by the client once the
+ * asynchronous preparation has finished. The driver must dup a file
+ * descriptor if it wants to get access to the cache file later.
+ *
+ * The model is prepared asynchronously with respect to the caller. The
+ * prepareModel function must verify the inputs to the preparedModel
+ * function related to preparing the model (as opposed to saving the
+ * prepared model to cache) are correct. If there is an error, prepareModel
+ * must immediately invoke the callback with the appropriate ErrorStatus
+ * value and nullptr for the IPreparedModel, then return with the same
+ * ErrorStatus. If the inputs to the prepareModel function that are related
+ * to preparing the model are valid and there is no error, prepareModel must
+ * launch an asynchronous task to prepare the model in the background, and
+ * immediately return from prepareModel with ErrorStatus::NONE. If the
+ * asynchronous task fails to launch, prepareModel must immediately invoke
+ * the callback with ErrorStatus::GENERAL_FAILURE and nullptr for the
+ * IPreparedModel, then return with ErrorStatus::GENERAL_FAILURE.
+ *
+ * When the asynchronous task has finished preparing the model, it must
+ * immediately invoke the callback function provided as an input to
+ * prepareModel. If the model was prepared successfully, the callback object
+ * must be invoked with an error status of ErrorStatus::NONE and the
+ * produced IPreparedModel object. If an error occurred preparing the model,
+ * the callback object must be invoked with the appropriate ErrorStatus
+ * value and nullptr for the IPreparedModel.
+ *
+ * Optionally, the driver may save the prepared model to cache during the
+ * asynchronous preparation. Any error that occurs when saving to cache must
+ * not affect the status of preparing the model. Even if the input arguments
+ * related to the cache may be invalid, or the driver may fail to save to
+ * cache, the prepareModel function must finish preparing the model. The
+ * driver may choose not to save to cache even if the caching information is
+ * provided and valid.
+ *
+ * The only information that may be unknown to the model at this stage is
+ * the shape of the tensors, which may only be known at execution time. As
+ * such, some driver services may return partially prepared models, where
+ * the prepared model may only be finished when it is paired with a set of
+ * inputs to the model. Note that the same prepared model object may be used
+ * with different shapes of inputs on different (possibly concurrent)
+ * executions.
+ *
+ * Multiple threads may call prepareModel on the same model concurrently.
+ *
+ * @param model The model to be prepared for execution.
+ * @param preference Indicates the intended execution behavior of a prepared
+ * model.
+ * @param modelCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the security-sensitive cache. The length of
+ * the vector must either be 0 indicating that caching information is
+ * not provided, or match the numModelCache returned from
+ * getNumberOfCacheFilesNeeded. The cache handles will be provided in
+ * the same order when retrieving the preparedModel from cache files
+ * with prepareModelFromCache.
+ * @param dataCache A vector of handles with each entry holding exactly one
+ * cache file descriptor for the constants' cache. The length of the
+ * vector must either be 0 indicating that caching information is not
+ * provided, or match the numDataCache returned from
+ * getNumberOfCacheFilesNeeded. The cache handles will be provided in
+ * the same order when retrieving the preparedModel from cache files
+ * with prepareModelFromCache.
+ * @param token A caching token of length Constant::BYTE_SIZE_OF_CACHE_TOKEN
+ * identifying the prepared model. The same token will be provided when
+ * retrieving the prepared model from the cache files with
+ * prepareModelFromCache. Tokens should be chosen to have a low rate of
+ * collision for a particular application. The driver cannot detect a
+ * collision; a collision will result in a failed execution or in a
+ * successful execution that produces incorrect output values. If both
+ * modelCache and dataCache are empty indicating that caching
+ * information is not provided, this token must be ignored.
+ * @param callback A callback object used to return the error status of
+ * preparing the model for execution and the prepared model if
+ * successful, nullptr otherwise. The callback object's notify function
+ * must be called exactly once, even if the model could not be prepared.
+ * @return status Error status of launching a task which prepares the model
+ * in the background; must be:
+ * - NONE if preparation task is successfully launched
+ * - DEVICE_UNAVAILABLE if driver is offline or busy
+ * - GENERAL_FAILURE if there is an unspecified error
+ * - INVALID_ARGUMENT if one of the input arguments related to preparing
+ * the model is invalid
+ */
+ prepareModel_1_3(Model model, ExecutionPreference preference,
+ vec<handle> modelCache, vec<handle> dataCache,
+ uint8_t[Constant:BYTE_SIZE_OF_CACHE_TOKEN] token,
+ IPreparedModelCallback callback)
+ generates (ErrorStatus status);
+};
diff --git a/neuralnetworks/1.3/types.hal b/neuralnetworks/1.3/types.hal
new file mode 100644
index 0000000..86ab287
--- /dev/null
+++ b/neuralnetworks/1.3/types.hal
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.3;
+
+import @1.0::DataLocation;
+import @1.0::OperandLifeTime;
+import @1.0::PerformanceInfo;
+import @1.2::OperandType;
+import @1.2::OperationType;
+import @1.2::SymmPerChannelQuantParams;
+
+import android.hidl.safe_union@1.0::Monostate;
+
+enum OperandType : @1.2::OperandType {
+ /**
+ * A tensor of 8 bit signed integers that represent real numbers.
+ *
+ * Attached to this tensor are two numbers that can be used to convert the
+ * 8 bit integer to the real value and vice versa. These two numbers are:
+ * - scale: a 32 bit floating point value greater than zero.
+ * - zeroPoint: a 32 bit integer, in range [-128, 127].
+ *
+ * The formula is:
+ * real_value = (integer_value - zeroPoint) * scale.
+ */
+ TENSOR_QUANT8_ASYMM_SIGNED = 14,
+
+ /*
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * OEM specific scalar value.
+ * OEM = 10000,
+ */
+ /*
+ * DEPRECATED. Since HAL version 1.2, extensions are the preferred
+ * alternative to OEM operation and data types.
+ *
+ * A tensor of OEM specific values.
+ * TENSOR_OEM_BYTE = 10001,
+ */
+ /* ADDING A NEW FUNDAMENTAL TYPE REQUIRES UPDATING THE VALUE OF
+ * OperandTypeRange::FUNDAMENTAL_MAX.
+ */
+ /* ADDING A NEW OEM TYPE REQUIRES UPDATING THE VALUE OF
+ * OperandTypeRange::OEM_MAX.
+ */
+};
+
+/**
+ * The range of operand values in the OperandType enum.
+ */
+enum OperandTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+ FUNDAMENTAL_MAX = 14,
+ OEM_MIN = 10000,
+ OEM_MAX = 10001,
+ BASE_MAX = 0xFFFF,
+};
+
+
+/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as
+ * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
+ * Describes one operand of the model's graph.
+ */
+struct Operand {
+ /**
+ * The data type.
+ *
+ * Besides the values listed in {@link OperandType}, any value above
+ * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ *
+ * For a scalar operand, dimensions.size() must be 0.
+ *
+ * A tensor operand with all dimensions specified has "fully
+ * specified" dimensions. Whenever possible (i.e., whenever the
+ * dimensions are known at model construction time), a tensor
+ * operand should have (but is not required to have) fully
+ * specified dimensions, in order to enable the best possible
+ * performance.
+ *
+ * If a tensor operand's dimensions are not fully specified, the
+ * dimensions of the operand are deduced from the operand
+ * dimensions and values of the operation for which that operand
+ * is an output.
+ *
+ * In the following situations, a tensor operand's dimensions must
+ * be fully specified:
+ *
+ * . The operand has lifetime CONSTANT_COPY or
+ * CONSTANT_REFERENCE.
+ *
+ * . The operand has lifetime MODEL_INPUT. Fully
+ * specified dimensions must either be present in the
+ * Operand or they must be provided in the corresponding
+ * RequestArgument.
+ * EXCEPTION: If the input is optional and omitted
+ * (by setting the hasNoValue field of the corresponding
+ * RequestArgument to true) then it need not have fully
+ * specified dimensions.
+ *
+ * A tensor operand with some number of unspecified dimensions is
+ * represented by setting each unspecified dimension to 0.
+ *
+ * A tensor operand with unspecified rank is represented by providing
+ * an empty dimensions vector.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
+ */
+ uint32_t numberOfConsumers;
+
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
+ */
+ float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
+ int32_t zeroPoint;
+
+ /**
+ * How the operand is used.
+ */
+ OperandLifeTime lifetime;
+
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
+ * NO_VALUE:
+ * - All the fields must be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
+ DataLocation location;
+
+ /**
+ * Additional parameters specific to a particular operand type.
+ */
+ safe_union ExtraParams {
+ /**
+ * No additional parameters.
+ */
+ Monostate none;
+
+ /**
+ * Symmetric per-channel quantization parameters.
+ *
+ * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
+ */
+ SymmPerChannelQuantParams channelQuant;
+
+ /**
+ * Extension operand parameters.
+ *
+ * The framework treats this as an opaque data blob.
+ * The format is up to individual extensions.
+ */
+ vec<uint8_t> extension;
+ } extraParams;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * may not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+
+ /**
+ * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
+ * precision as low as that of the IEEE 754 16-bit floating-point format.
+ * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
+ * range and precision of the IEEE 754 32-bit floating-point format.
+ */
+ bool relaxComputationFloat32toFloat16;
+
+ /**
+ * The mapping between extension names and prefixes of operand and
+ * operation type values.
+ *
+ * An operand or operation whose numeric type value is above
+ * {@link OperandTypeRange::BASE_MAX} or
+ * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted
+ * as an extension operand. The low
+ * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value
+ * correspond to the type ID within the extension and the high
+ * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
+ * the "prefix", which maps uniquely to the extension name.
+ *
+ * For example, if a model contains an operation whose value is
+ * 0xAAAABBBB and extensionNameToPrefix contains an entry with
+ * prefix=0xAAAA and name="vendor.test.test_extension", then
+ * the operation should be interpreted as the operation 0xBBBB
+ * of the extension named vendor.test.test_extension.
+ *
+ * This is a one-to-one correspondence. That is, there must be at most one
+ * prefix corresponding to each extension name and at most one extension
+ * name corresponding to each prefix.
+ */
+ vec<ExtensionNameAndPrefix> extensionNameToPrefix;
+
+ /**
+ * A correspondence between an extension name and a prefix of operand and
+ * operation type values.
+ */
+ struct ExtensionNameAndPrefix {
+ /**
+ * The extension name.
+ *
+ * See {@link Extension::name} for the format specification.
+ */
+ string name;
+
+ /**
+ * The unique extension identifier within the model.
+ *
+ * See {@link Model::extensionNameToPrefix}.
+ */
+ uint16_t prefix;
+ };
+
+ /**
+ * Numeric values of extension operand and operation types have the
+ * following structure:
+ * - 16 high bits represent the "prefix", which corresponds uniquely to the
+ * extension name.
+ * - 16 low bits represent the type ID within the extension.
+ */
+ enum ExtensionTypeEncoding : uint8_t {
+ HIGH_BITS_PREFIX = 16,
+ LOW_BITS_TYPE = 16,
+ };
+};
diff --git a/neuralnetworks/1.3/types.t b/neuralnetworks/1.3/types.t
new file mode 100644
index 0000000..d41cfd2
--- /dev/null
+++ b/neuralnetworks/1.3/types.t
@@ -0,0 +1,344 @@
+%% template file for generating types.hal.
+%% see frameworks/ml/nn/tools/api/README.md.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.neuralnetworks@1.3;
+
+import @1.0::DataLocation;
+import @1.0::OperandLifeTime;
+import @1.0::PerformanceInfo;
+import @1.2::OperandType;
+import @1.2::OperationType;
+import @1.2::SymmPerChannelQuantParams;
+
+import android.hidl.safe_union@1.0::Monostate;
+
+enum OperandType : @1.2::OperandType {
+%insert Operand_1.3
+%insert OEMDeprecationAndOperandTypeRangeMaxComment
+};
+
+/**
+ * The range of operand values in the OperandType enum.
+ */
+enum OperandTypeRange : uint32_t {
+ BASE_MIN = 0,
+ FUNDAMENTAL_MIN = 0,
+%insert Operand_1.3_MAX
+ OEM_MIN = 10000,
+ OEM_MAX = 10001,
+ BASE_MAX = 0xFFFF,
+};
+
+
+/**
+ * The capabilities of a driver.
+ *
+ * Performance of an operation comes from the type of its first operand.
+ * This represents performance for non extension operand types.
+ */
+struct Capabilities {
+ /**
+ * Driver performance when operating on float32 data but performing
+ * calculations with range and/or precision as low as that of the IEEE
+ * 754 16-bit floating-point format.
+ */
+ PerformanceInfo relaxedFloat32toFloat16PerformanceScalar;
+ PerformanceInfo relaxedFloat32toFloat16PerformanceTensor;
+
+ /**
+ * Driver performance when operating on a particular data type.
+ * In the case of float32 data, this is used when the calculations
+ * are not relaxed.
+ */
+ struct OperandPerformance {
+ OperandType type;
+ PerformanceInfo info;
+ };
+
+ /**
+ * Performance by operand type. Must be sorted by OperandType.
+ * If a particular OperandType is not present in operandPerformance,
+ * its performance is treated as
+ * { .execTime = FLT_MAX, .powerUsage = FLT_MAX }.
+ */
+ vec<OperandPerformance> operandPerformance;
+};
+
+/**
+ * Describes one operand of the model's graph.
+ */
+struct Operand {
+ /**
+ * The data type.
+ *
+ * Besides the values listed in {@link OperandType}, any value above
+ * {@link OperandTypeRange::BASE_MAX} is possible and should be interpreted
+ * as an extension type according to {@link Model::extensionNameToPrefix}.
+ */
+ OperandType type;
+
+ /**
+ * Dimensions of the operand.
+ *
+ * For a scalar operand, dimensions.size() must be 0.
+ *
+ * A tensor operand with all dimensions specified has "fully
+ * specified" dimensions. Whenever possible (i.e., whenever the
+ * dimensions are known at model construction time), a tensor
+ * operand should have (but is not required to have) fully
+ * specified dimensions, in order to enable the best possible
+ * performance.
+ *
+ * If a tensor operand's dimensions are not fully specified, the
+ * dimensions of the operand are deduced from the operand
+ * dimensions and values of the operation for which that operand
+ * is an output.
+ *
+ * In the following situations, a tensor operand's dimensions must
+ * be fully specified:
+ *
+ * . The operand has lifetime CONSTANT_COPY or
+ * CONSTANT_REFERENCE.
+ *
+ * . The operand has lifetime MODEL_INPUT. Fully
+ * specified dimensions must either be present in the
+ * Operand or they must be provided in the corresponding
+ * RequestArgument.
+ * EXCEPTION: If the input is optional and omitted
+ * (by setting the hasNoValue field of the corresponding
+ * RequestArgument to true) then it need not have fully
+ * specified dimensions.
+ *
+ * A tensor operand with some number of unspecified dimensions is
+ * represented by setting each unspecified dimension to 0.
+ *
+ * A tensor operand with unspecified rank is represented by providing
+ * an empty dimensions vector.
+ */
+ vec<uint32_t> dimensions;
+
+ /**
+ * The number of times this operand appears as an operation input.
+ *
+ * (For example, if this operand appears once in one operation's
+ * input list, and three times in another operation's input list,
+ * then numberOfConsumers = 4.)
+ */
+ uint32_t numberOfConsumers;
+
+ /**
+ * Quantized scale of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM or
+ * TENSOR_INT32.
+ */
+ float scale;
+
+ /**
+ * Quantized zero-point offset of the operand.
+ *
+ * Only applicable if the operand is of type TENSOR_QUANT8_ASYMM.
+ */
+ int32_t zeroPoint;
+
+ /**
+ * How the operand is used.
+ */
+ OperandLifeTime lifetime;
+
+ /**
+ * Where to find the data for this operand.
+ * If the lifetime is TEMPORARY_VARIABLE, MODEL_INPUT, MODEL_OUTPUT, or
+ * NO_VALUE:
+ * - All the fields must be 0.
+ * If the lifetime is CONSTANT_COPY:
+ * - location.poolIndex is 0.
+ * - location.offset is the offset in bytes into Model.operandValues.
+ * - location.length is set.
+ * If the lifetime is CONSTANT_REFERENCE:
+ * - location.poolIndex is set.
+ * - location.offset is the offset in bytes into the specified pool.
+ * - location.length is set.
+ */
+ DataLocation location;
+
+ /**
+ * Additional parameters specific to a particular operand type.
+ */
+ safe_union ExtraParams {
+ /**
+ * No additional parameters.
+ */
+ Monostate none;
+
+ /**
+ * Symmetric per-channel quantization parameters.
+ *
+ * Only applicable to operands of type TENSOR_QUANT8_SYMM_PER_CHANNEL.
+ */
+ SymmPerChannelQuantParams channelQuant;
+
+ /**
+ * Extension operand parameters.
+ *
+ * The framework treats this as an opaque data blob.
+ * The format is up to individual extensions.
+ */
+ vec<uint8_t> extension;
+ } extraParams;
+};
+
+/**
+ * Describes one operation of the model's graph.
+ */
+struct Operation {
+ /**
+ * The operation type.
+ */
+ OperationType type;
+
+ /**
+ * Describes the table that contains the indexes of the inputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> inputs;
+
+ /**
+ * Describes the table that contains the indexes of the outputs of the
+ * operation. The offset is the index in the operandIndexes table.
+ */
+ vec<uint32_t> outputs;
+};
+
+/**
+ * A Neural Network Model.
+ *
+ * This includes not only the execution graph, but also constant data such as
+ * weights or scalars added at construction time. The only information that
+ * may not be known is the shape of the input tensors.
+ */
+struct Model {
+ /**
+ * All operands included in the model.
+ */
+ vec<Operand> operands;
+
+ /**
+ * All operations included in the model.
+ *
+ * The operations are sorted into execution order. Every operand
+ * with lifetime MODEL_OUTPUT or TEMPORARY_VARIABLE must be
+ * written before it is read.
+ */
+ vec<Operation> operations;
+
+ /**
+ * Input indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> inputIndexes;
+
+ /**
+ * Output indexes of the model. There must be at least one.
+ *
+ * Each value corresponds to the index of the operand in "operands".
+ */
+ vec<uint32_t> outputIndexes;
+
+ /**
+ * A byte buffer containing operand data that were copied into the model.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_COPY.
+ */
+ vec<uint8_t> operandValues;
+
+ /**
+ * A collection of shared memory pools containing operand values.
+ *
+ * An operand's value must be located here if and only if Operand::lifetime
+ * equals OperandLifeTime::CONSTANT_REFERENCE.
+ */
+ vec<memory> pools;
+
+ /**
+ * 'true' indicates TENSOR_FLOAT32 may be calculated with range and/or
+ * precision as low as that of the IEEE 754 16-bit floating-point format.
+ * 'false' indicates TENSOR_FLOAT32 must be calculated using at least the
+ * range and precision of the IEEE 754 32-bit floating-point format.
+ */
+ bool relaxComputationFloat32toFloat16;
+
+ /**
+ * The mapping between extension names and prefixes of operand and
+ * operation type values.
+ *
+ * An operand or operation whose numeric type value is above
+ * {@link OperandTypeRange::BASE_MAX} or
+ * {@link OperationTypeRange::BASE_MAX} respectively should be interpreted
+ * as an extension operand. The low
+ * {@link Model::ExtensionTypeEncoding::LOW_BITS_TYPE} bits of the value
+ * correspond to the type ID within the extension and the high
+ * {@link Model::ExtensionTypeEncoding::HIGH_BITS_PREFIX} bits encode
+ * the "prefix", which maps uniquely to the extension name.
+ *
+ * For example, if a model contains an operation whose value is
+ * 0xAAAABBBB and extensionNameToPrefix contains an entry with
+ * prefix=0xAAAA and name="vendor.test.test_extension", then
+ * the operation should be interpreted as the operation 0xBBBB
+ * of the extension named vendor.test.test_extension.
+ *
+ * This is a one-to-one correspondence. That is, there must be at most one
+ * prefix corresponding to each extension name and at most one extension
+ * name corresponding to each prefix.
+ */
+ vec<ExtensionNameAndPrefix> extensionNameToPrefix;
+
+ /**
+ * A correspondence between an extension name and a prefix of operand and
+ * operation type values.
+ */
+ struct ExtensionNameAndPrefix {
+ /**
+ * The extension name.
+ *
+ * See {@link Extension::name} for the format specification.
+ */
+ string name;
+
+ /**
+ * The unique extension identifier within the model.
+ *
+ * See {@link Model::extensionNameToPrefix}.
+ */
+ uint16_t prefix;
+ };
+
+ /**
+ * Numeric values of extension operand and operation types have the
+ * following structure:
+ * - 16 high bits represent the "prefix", which corresponds uniquely to the
+ * extension name.
+ * - 16 low bits represent the type ID within the extension.
+ */
+ enum ExtensionTypeEncoding : uint8_t {
+ HIGH_BITS_PREFIX = 16,
+ LOW_BITS_TYPE = 16,
+ };
+};
diff --git a/neuralnetworks/1.3/vts/OWNERS b/neuralnetworks/1.3/vts/OWNERS
new file mode 100644
index 0000000..b5a8e1f
--- /dev/null
+++ b/neuralnetworks/1.3/vts/OWNERS
@@ -0,0 +1,16 @@
+# Neuralnetworks team
+butlermichael@google.com
+dgross@google.com
+jeanluc@google.com
+levp@google.com
+miaowang@google.com
+mikie@google.com
+mks@google.com
+pszczepaniak@google.com
+slavash@google.com
+vddang@google.com
+xusongw@google.com
+
+# VTS team
+yim@google.com
+yuexima@google.com
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
new file mode 100644
index 0000000..90ce852
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -0,0 +1,58 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_test {
+ name: "VtsHalNeuralNetworksV1_3TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "BasicTests.cpp",
+ "CompilationCachingTests.cpp",
+ "GeneratedTestHarness.cpp",
+ "TestAssertions.cpp",
+ "ValidateBurst.cpp",
+ "ValidateModel.cpp",
+ "ValidateRequest.cpp",
+ "VtsHalNeuralnetworks.cpp",
+ ],
+ shared_libs: [
+ "libfmq",
+ "libnativewindow",
+ ],
+ static_libs: [
+ "android.hardware.neuralnetworks@1.0",
+ "android.hardware.neuralnetworks@1.1",
+ "android.hardware.neuralnetworks@1.2",
+ "android.hardware.neuralnetworks@1.3",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libgmock",
+ "libhidlmemory",
+ "libneuralnetworks_generated_test_harness",
+ "libneuralnetworks_utils",
+ "VtsHalNeuralNetworksV1_0_utils",
+ "VtsHalNeuralNetworksV1_2Callbacks",
+ ],
+ whole_static_libs: [
+ "neuralnetworks_generated_V1_0_example",
+ "neuralnetworks_generated_V1_1_example",
+ "neuralnetworks_generated_V1_2_example",
+ "neuralnetworks_generated_V1_3_example",
+ ],
+ header_libs: [
+ "libneuralnetworks_headers",
+ ],
+ test_suites: ["general-tests"],
+}
diff --git a/neuralnetworks/1.3/vts/functional/BasicTests.cpp b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
new file mode 100644
index 0000000..b64dc2f
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/BasicTests.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using V1_0::DeviceStatus;
+using V1_0::ErrorStatus;
+using V1_0::PerformanceInfo;
+using V1_2::Constant;
+using V1_2::DeviceType;
+using V1_2::Extension;
+
+// create device test
+TEST_P(NeuralnetworksHidlTest, CreateDevice) {}
+
+// status test
+TEST_P(NeuralnetworksHidlTest, StatusTest) {
+ Return<DeviceStatus> status = kDevice->getStatus();
+ ASSERT_TRUE(status.isOk());
+ EXPECT_EQ(DeviceStatus::AVAILABLE, static_cast<DeviceStatus>(status));
+}
+
+// initialization
+TEST_P(NeuralnetworksHidlTest, GetCapabilitiesTest) {
+ using OperandPerformance = Capabilities::OperandPerformance;
+ Return<void> ret = kDevice->getCapabilities_1_3([](ErrorStatus status,
+ const Capabilities& capabilities) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+
+ auto isPositive = [](const PerformanceInfo& perf) {
+ return perf.execTime > 0.0f && perf.powerUsage > 0.0f;
+ };
+
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceScalar));
+ EXPECT_TRUE(isPositive(capabilities.relaxedFloat32toFloat16PerformanceTensor));
+ const auto& opPerf = capabilities.operandPerformance;
+ EXPECT_TRUE(std::all_of(
+ opPerf.begin(), opPerf.end(),
+ [isPositive](const OperandPerformance& a) { return isPositive(a.info); }));
+ EXPECT_TRUE(std::is_sorted(opPerf.begin(), opPerf.end(),
+ [](const OperandPerformance& a, const OperandPerformance& b) {
+ return a.type < b.type;
+ }));
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
new file mode 100644
index 0000000..0ac4738
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/CompilationCachingTests.cpp
@@ -0,0 +1,1377 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include <android-base/logging.h>
+#include <fcntl.h>
+#include <ftw.h>
+#include <gtest/gtest.h>
+#include <hidlmemory/mapping.h>
+#include <unistd.h>
+
+#include <cstdio>
+#include <cstdlib>
+#include <random>
+#include <thread>
+
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
+// Forward declaration of the mobilenet generated test models in
+// frameworks/ml/nn/runtime/test/generated/.
+namespace generated_tests::mobilenet_224_gender_basic_fixed {
+const test_helper::TestModel& get_test_model();
+} // namespace generated_tests::mobilenet_224_gender_basic_fixed
+
+namespace generated_tests::mobilenet_quantized {
+const test_helper::TestModel& get_test_model();
+} // namespace generated_tests::mobilenet_quantized
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using namespace test_helper;
+using V1_0::ErrorStatus;
+using V1_1::ExecutionPreference;
+using V1_2::Constant;
+using V1_2::IPreparedModel;
+using V1_2::OperationType;
+using V1_2::implementation::PreparedModelCallback;
+
+namespace float32_model {
+
+constexpr auto get_test_model = generated_tests::mobilenet_224_gender_basic_fixed::get_test_model;
+
+} // namespace float32_model
+
+namespace quant8_model {
+
+constexpr auto get_test_model = generated_tests::mobilenet_quantized::get_test_model;
+
+} // namespace quant8_model
+
+namespace {
+
+enum class AccessMode { READ_WRITE, READ_ONLY, WRITE_ONLY };
+
+// Creates cache handles based on provided file groups.
+// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups,
+ const std::vector<AccessMode>& mode, hidl_vec<hidl_handle>* handles) {
+ handles->resize(fileGroups.size());
+ for (uint32_t i = 0; i < fileGroups.size(); i++) {
+ std::vector<int> fds;
+ for (const auto& file : fileGroups[i]) {
+ int fd;
+ if (mode[i] == AccessMode::READ_ONLY) {
+ fd = open(file.c_str(), O_RDONLY);
+ } else if (mode[i] == AccessMode::WRITE_ONLY) {
+ fd = open(file.c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+ } else if (mode[i] == AccessMode::READ_WRITE) {
+ fd = open(file.c_str(), O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
+ } else {
+ FAIL();
+ }
+ ASSERT_GE(fd, 0);
+ fds.push_back(fd);
+ }
+ native_handle_t* cacheNativeHandle = native_handle_create(fds.size(), 0);
+ ASSERT_NE(cacheNativeHandle, nullptr);
+ std::copy(fds.begin(), fds.end(), &cacheNativeHandle->data[0]);
+ (*handles)[i].setTo(cacheNativeHandle, /*shouldOwn=*/true);
+ }
+}
+
+void createCacheHandles(const std::vector<std::vector<std::string>>& fileGroups, AccessMode mode,
+ hidl_vec<hidl_handle>* handles) {
+ createCacheHandles(fileGroups, std::vector<AccessMode>(fileGroups.size(), mode), handles);
+}
+
+// Create a chain of broadcast operations. The second operand is always constant tensor [1].
+// For simplicity, activation scalar is shared. The second operand is not shared
+// in the model to let driver maintain a non-trivial size of constant data and the corresponding
+// data locations in cache.
+//
+// --------- activation --------
+// ↓ ↓ ↓ ↓
+// E.g. input -> ADD -> ADD -> ADD -> ... -> ADD -> output
+// ↑ ↑ ↑ ↑
+// [1] [1] [1] [1]
+//
+// This function assumes the operation is either ADD or MUL.
+template <typename CppType, TestOperandType operandType>
+TestModel createLargeTestModelImpl(TestOperationType op, uint32_t len) {
+ EXPECT_TRUE(op == TestOperationType::ADD || op == TestOperationType::MUL);
+
+ // Model operations and operands.
+ std::vector<TestOperation> operations(len);
+ std::vector<TestOperand> operands(len * 2 + 2);
+
+ // The activation scalar, value = 0.
+ operands[0] = {
+ .type = TestOperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = len,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = TestOperandLifeTime::CONSTANT_COPY,
+ .data = TestBuffer::createFromVector<int32_t>({0}),
+ };
+
+ // The buffer value of the constant second operand. The logical value is always 1.0f.
+ CppType bufferValue;
+ // The scale of the first and second operand.
+ float scale1, scale2;
+ if (operandType == TestOperandType::TENSOR_FLOAT32) {
+ bufferValue = 1.0f;
+ scale1 = 0.0f;
+ scale2 = 0.0f;
+ } else if (op == TestOperationType::ADD) {
+ bufferValue = 1;
+ scale1 = 1.0f;
+ scale2 = 1.0f;
+ } else {
+ // To satisfy the constraint on quant8 MUL: input0.scale * input1.scale < output.scale,
+ // set input1 to have scale = 0.5f and bufferValue = 2, i.e. 1.0f in floating point.
+ bufferValue = 2;
+ scale1 = 1.0f;
+ scale2 = 0.5f;
+ }
+
+ for (uint32_t i = 0; i < len; i++) {
+ const uint32_t firstInputIndex = i * 2 + 1;
+ const uint32_t secondInputIndex = firstInputIndex + 1;
+ const uint32_t outputIndex = secondInputIndex + 1;
+
+ // The first operation input.
+ operands[firstInputIndex] = {
+ .type = operandType,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = scale1,
+ .zeroPoint = 0,
+ .lifetime = (i == 0 ? TestOperandLifeTime::MODEL_INPUT
+ : TestOperandLifeTime::TEMPORARY_VARIABLE),
+ .data = (i == 0 ? TestBuffer::createFromVector<CppType>({1}) : TestBuffer()),
+ };
+
+ // The second operation input, value = 1.
+ operands[secondInputIndex] = {
+ .type = operandType,
+ .dimensions = {1},
+ .numberOfConsumers = 1,
+ .scale = scale2,
+ .zeroPoint = 0,
+ .lifetime = TestOperandLifeTime::CONSTANT_COPY,
+ .data = TestBuffer::createFromVector<CppType>({bufferValue}),
+ };
+
+ // The operation. All operations share the same activation scalar.
+ // The output operand is created as an input in the next iteration of the loop, in the case
+ // of all but the last member of the chain; and after the loop as a model output, in the
+ // case of the last member of the chain.
+ operations[i] = {
+ .type = op,
+ .inputs = {firstInputIndex, secondInputIndex, /*activation scalar*/ 0},
+ .outputs = {outputIndex},
+ };
+ }
+
+ // For TestOperationType::ADD, output = 1 + 1 * len = len + 1
+ // For TestOperationType::MUL, output = 1 * 1 ^ len = 1
+ CppType outputResult = static_cast<CppType>(op == TestOperationType::ADD ? len + 1u : 1u);
+
+ // The model output.
+ operands.back() = {
+ .type = operandType,
+ .dimensions = {1},
+ .numberOfConsumers = 0,
+ .scale = scale1,
+ .zeroPoint = 0,
+ .lifetime = TestOperandLifeTime::MODEL_OUTPUT,
+ .data = TestBuffer::createFromVector<CppType>({outputResult}),
+ };
+
+ return {
+ .operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = {1},
+ .outputIndexes = {len * 2 + 1},
+ .isRelaxed = false,
+ };
+}
+
+} // namespace
+
+// Tag for the compilation caching tests.
+class CompilationCachingTestBase : public testing::Test {
+ protected:
+ CompilationCachingTestBase(sp<IDevice> device, OperandType type)
+ : kDevice(std::move(device)), kOperandType(type) {}
+
+ void SetUp() override {
+ testing::Test::SetUp();
+ ASSERT_NE(kDevice.get(), nullptr);
+
+ // Create cache directory. The cache directory and a temporary cache file is always created
+ // to test the behavior of prepareModelFromCache, even when caching is not supported.
+ char cacheDirTemp[] = "/data/local/tmp/TestCompilationCachingXXXXXX";
+ char* cacheDir = mkdtemp(cacheDirTemp);
+ ASSERT_NE(cacheDir, nullptr);
+ mCacheDir = cacheDir;
+ mCacheDir.push_back('/');
+
+ Return<void> ret = kDevice->getNumberOfCacheFilesNeeded(
+ [this](ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
+ EXPECT_EQ(ErrorStatus::NONE, status);
+ mNumModelCache = numModelCache;
+ mNumDataCache = numDataCache;
+ });
+ EXPECT_TRUE(ret.isOk());
+ mIsCachingSupported = mNumModelCache > 0 || mNumDataCache > 0;
+
+ // Create empty cache files.
+ mTmpCache = mCacheDir + "tmp";
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ mModelCache.push_back({mCacheDir + "model" + std::to_string(i)});
+ }
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ mDataCache.push_back({mCacheDir + "data" + std::to_string(i)});
+ }
+ // Dummy handles, use AccessMode::WRITE_ONLY for createCacheHandles to create files.
+ hidl_vec<hidl_handle> modelHandle, dataHandle, tmpHandle;
+ createCacheHandles(mModelCache, AccessMode::WRITE_ONLY, &modelHandle);
+ createCacheHandles(mDataCache, AccessMode::WRITE_ONLY, &dataHandle);
+ createCacheHandles({{mTmpCache}}, AccessMode::WRITE_ONLY, &tmpHandle);
+
+ if (!mIsCachingSupported) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service does not "
+ "support compilation caching.";
+ std::cout << "[ ] Early termination of test because vendor service does not "
+ "support compilation caching."
+ << std::endl;
+ }
+ }
+
+ void TearDown() override {
+ // If the test passes, remove the tmp directory. Otherwise, keep it for debugging purposes.
+ if (!testing::Test::HasFailure()) {
+ // Recursively remove the cache directory specified by mCacheDir.
+ auto callback = [](const char* entry, const struct stat*, int, struct FTW*) {
+ return remove(entry);
+ };
+ nftw(mCacheDir.c_str(), callback, 128, FTW_DEPTH | FTW_MOUNT | FTW_PHYS);
+ }
+ testing::Test::TearDown();
+ }
+
+ // Model and examples creators. According to kOperandType, the following methods will return
+ // either float32 model/examples or the quant8 variant.
+ TestModel createTestModel() {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return float32_model::get_test_model();
+ } else {
+ return quant8_model::get_test_model();
+ }
+ }
+
+ TestModel createLargeTestModel(OperationType op, uint32_t len) {
+ if (kOperandType == OperandType::TENSOR_FLOAT32) {
+ return createLargeTestModelImpl<float, TestOperandType::TENSOR_FLOAT32>(
+ static_cast<TestOperationType>(op), len);
+ } else {
+ return createLargeTestModelImpl<uint8_t, TestOperandType::TENSOR_QUANT8_ASYMM>(
+ static_cast<TestOperationType>(op), len);
+ }
+ }
+
+ // See if the service can handle the model.
+ bool isModelFullySupported(const Model& model) {
+ bool fullySupportsModel = false;
+ Return<void> supportedCall = kDevice->getSupportedOperations_1_3(
+ model,
+ [&fullySupportsModel, &model](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_EQ(supported.size(), model.operations.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ EXPECT_TRUE(supportedCall.isOk());
+ return fullySupportsModel;
+ }
+
+ void saveModelToCache(const Model& model, const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ sp<IPreparedModel>* preparedModel = nullptr) {
+ if (preparedModel != nullptr) *preparedModel = nullptr;
+
+ // Launch prepare model.
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
+ Return<ErrorStatus> prepareLaunchStatus =
+ kDevice->prepareModel_1_3(model, ExecutionPreference::FAST_SINGLE_ANSWER,
+ modelCache, dataCache, cacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(static_cast<ErrorStatus>(prepareLaunchStatus), ErrorStatus::NONE);
+
+ // Retrieve prepared model.
+ preparedModelCallback->wait();
+ ASSERT_EQ(preparedModelCallback->getStatus(), ErrorStatus::NONE);
+ if (preparedModel != nullptr) {
+ *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+ .withDefault(nullptr);
+ }
+ }
+
+ bool checkEarlyTermination(ErrorStatus status) {
+ if (status == ErrorStatus::GENERAL_FAILURE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "save the prepared model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "save the prepared model that it does not support."
+ << std::endl;
+ return true;
+ }
+ return false;
+ }
+
+ bool checkEarlyTermination(const Model& model) {
+ if (!isModelFullySupported(model)) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "prepare model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ return true;
+ }
+ return false;
+ }
+
+ void prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
+ const hidl_vec<hidl_handle>& dataCache,
+ sp<IPreparedModel>* preparedModel, ErrorStatus* status) {
+ // Launch prepare model from cache.
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ hidl_array<uint8_t, sizeof(mToken)> cacheToken(mToken);
+ Return<ErrorStatus> prepareLaunchStatus = kDevice->prepareModelFromCache(
+ modelCache, dataCache, cacheToken, preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ if (static_cast<ErrorStatus>(prepareLaunchStatus) != ErrorStatus::NONE) {
+ *preparedModel = nullptr;
+ *status = static_cast<ErrorStatus>(prepareLaunchStatus);
+ return;
+ }
+
+ // Retrieve prepared model.
+ preparedModelCallback->wait();
+ *status = preparedModelCallback->getStatus();
+ *preparedModel = IPreparedModel::castFrom(preparedModelCallback->getPreparedModel())
+ .withDefault(nullptr);
+ }
+
+ // Absolute path to the temporary cache directory.
+ std::string mCacheDir;
+
+ // Groups of file paths for model and data cache in the tmp cache directory, initialized with
+ // outer_size = mNum{Model|Data}Cache, inner_size = 1. The outer vector corresponds to handles
+ // and the inner vector is for fds held by each handle.
+ std::vector<std::vector<std::string>> mModelCache;
+ std::vector<std::vector<std::string>> mDataCache;
+
+ // A separate temporary file path in the tmp cache directory.
+ std::string mTmpCache;
+
+ uint8_t mToken[static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)] = {};
+ uint32_t mNumModelCache;
+ uint32_t mNumDataCache;
+ uint32_t mIsCachingSupported;
+
+ const sp<IDevice> kDevice;
+ // The primary data type of the testModel.
+ const OperandType kOperandType;
+};
+
+using CompilationCachingTestParam = std::tuple<NamedDevice, OperandType>;
+
+// A parameterized fixture of CompilationCachingTestBase. Every test will run twice, with the first
+// pass running with float32 models and the second pass running with quant8 models.
+class CompilationCachingTest : public CompilationCachingTestBase,
+ public testing::WithParamInterface<CompilationCachingTestParam> {
+ protected:
+ CompilationCachingTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
+};
+
+TEST_P(CompilationCachingTest, CacheSavingAndRetrieval) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+ sp<IPreparedModel> preparedModel = nullptr;
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (!mIsCachingSupported) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ ASSERT_NE(preparedModel, nullptr);
+ }
+ }
+
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+}
+
+TEST_P(CompilationCachingTest, CacheSavingAndRetrievalNonZeroOffset) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+ sp<IPreparedModel> preparedModel = nullptr;
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ uint8_t dummyBytes[] = {0, 0};
+ // Write a dummy integer to the cache.
+ // The driver should be able to handle non-empty cache and non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_EQ(write(modelCache[i].getNativeHandle()->data[0], &dummyBytes,
+ sizeof(dummyBytes)),
+ sizeof(dummyBytes));
+ }
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_EQ(
+ write(dataCache[i].getNativeHandle()->data[0], &dummyBytes, sizeof(dummyBytes)),
+ sizeof(dummyBytes));
+ }
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ uint8_t dummyByte = 0;
+ // Advance the offset of each handle by one byte.
+ // The driver should be able to handle non-zero fd offset.
+ for (uint32_t i = 0; i < modelCache.size(); i++) {
+ ASSERT_GE(read(modelCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ for (uint32_t i = 0; i < dataCache.size(); i++) {
+ ASSERT_GE(read(dataCache[i].getNativeHandle()->data[0], &dummyByte, 1), 0);
+ }
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (!mIsCachingSupported) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else if (checkEarlyTermination(status)) {
+ ASSERT_EQ(preparedModel, nullptr);
+ return;
+ } else {
+ ASSERT_EQ(status, ErrorStatus::NONE);
+ ASSERT_NE(preparedModel, nullptr);
+ }
+ }
+
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+}
+
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for model cache.
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an additional cache file for data cache.
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pop out the last cache file.
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumCache) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ // Test with number of model cache files greater than mNumModelCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of model cache files smaller than mNumModelCache.
+ if (mModelCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache.back();
+ mModelCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files greater than mNumDataCache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache.push_back({mTmpCache});
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Test with number of data cache files smaller than mNumDataCache.
+ if (mDataCache.size() > 0) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache.back();
+ mDataCache.pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache.push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingTest, SaveToCacheInvalidNumFd) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ // Pass an invalid number of fds for handle i.
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidNumFd) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ // Go through each handle in model cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mModelCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in model cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mModelCache[i].back();
+ mModelCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mModelCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd greater than 1.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ mDataCache[i].push_back(mTmpCache);
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].pop_back();
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with NumFd equal to 0.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ auto tmp = mDataCache[i].back();
+ mDataCache[i].pop_back();
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ mDataCache[i].push_back(tmp);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::GENERAL_FAILURE) {
+ ASSERT_EQ(status, ErrorStatus::INVALID_ARGUMENT);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingTest, SaveToCacheInvalidAccessMode) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
+
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::READ_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ sp<IPreparedModel> preparedModel = nullptr;
+ saveModelToCache(model, modelCache, dataCache, &preparedModel);
+ ASSERT_NE(preparedModel, nullptr);
+ // Execute and verify results.
+ EvaluatePreparedModel(preparedModel, testModel,
+ /*testDynamicOutputShape=*/false);
+ // Check if prepareModelFromCache fails.
+ preparedModel = nullptr;
+ ErrorStatus status;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ if (status != ErrorStatus::INVALID_ARGUMENT) {
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ }
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+TEST_P(CompilationCachingTest, PrepareModelFromCacheInvalidAccessMode) {
+ // Create test HIDL model and compile.
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+ std::vector<AccessMode> modelCacheMode(mNumModelCache, AccessMode::READ_WRITE);
+ std::vector<AccessMode> dataCacheMode(mNumDataCache, AccessMode::READ_WRITE);
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ // Go through each handle in model cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ modelCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ modelCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+
+ // Go through each handle in data cache, test with invalid access mode.
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ dataCacheMode[i] = AccessMode::WRITE_ONLY;
+ createCacheHandles(mModelCache, modelCacheMode, &modelCache);
+ createCacheHandles(mDataCache, dataCacheMode, &dataCache);
+ dataCacheMode[i] = AccessMode::READ_WRITE;
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+// Copy file contents between file groups.
+// The outer vector corresponds to handles and the inner vector is for fds held by each handle.
+// The outer vector sizes must match and the inner vectors must have size = 1.
+static void copyCacheFiles(const std::vector<std::vector<std::string>>& from,
+ const std::vector<std::vector<std::string>>& to) {
+ constexpr size_t kBufferSize = 1000000;
+ uint8_t buffer[kBufferSize];
+
+ ASSERT_EQ(from.size(), to.size());
+ for (uint32_t i = 0; i < from.size(); i++) {
+ ASSERT_EQ(from[i].size(), 1u);
+ ASSERT_EQ(to[i].size(), 1u);
+ int fromFd = open(from[i][0].c_str(), O_RDONLY);
+ int toFd = open(to[i][0].c_str(), O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR);
+ ASSERT_GE(fromFd, 0);
+ ASSERT_GE(toFd, 0);
+
+ ssize_t readBytes;
+ while ((readBytes = read(fromFd, &buffer, kBufferSize)) > 0) {
+ ASSERT_EQ(write(toFd, &buffer, readBytes), readBytes);
+ }
+ ASSERT_GE(readBytes, 0);
+
+ close(fromFd);
+ close(toFd);
+ }
+}
+
+// Number of operations in the large test model.
+constexpr uint32_t kLargeModelSize = 100;
+constexpr uint32_t kNumIterationsTOCTOU = 100;
+
+TEST_P(CompilationCachingTest, SaveToCache_TOCTOU) {
+ if (!mIsCachingSupported) return;
+
+ // Create test models and check if fully supported by the service.
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
+
+ // Save the modelMul compilation to cache.
+ auto modelCacheMul = mModelCache;
+ for (auto& cache : modelCacheMul) {
+ cache[0].append("_mul");
+ }
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
+ }
+
+ // Use a different token for modelAdd.
+ mToken[0]++;
+
+ // This test is probabilistic, so we run it multiple times.
+ for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
+ // Save the modelAdd compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+
+ // Spawn a thread to copy the cache content concurrently while saving to cache.
+ std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
+ saveModelToCache(modelAdd, modelCache, dataCache);
+ thread.join();
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+
+ // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
+ // the prepared model must be executed with the correct result and not crash.
+ if (status != ErrorStatus::NONE) {
+ ASSERT_EQ(preparedModel, nullptr);
+ } else {
+ ASSERT_NE(preparedModel, nullptr);
+ EvaluatePreparedModel(preparedModel, testModelAdd,
+ /*testDynamicOutputShape=*/false);
+ }
+ }
+ }
+}
+
+TEST_P(CompilationCachingTest, PrepareFromCache_TOCTOU) {
+ if (!mIsCachingSupported) return;
+
+ // Create test models and check if fully supported by the service.
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
+
+ // Save the modelMul compilation to cache.
+ auto modelCacheMul = mModelCache;
+ for (auto& cache : modelCacheMul) {
+ cache[0].append("_mul");
+ }
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
+ }
+
+ // Use a different token for modelAdd.
+ mToken[0]++;
+
+ // This test is probabilistic, so we run it multiple times.
+ for (uint32_t i = 0; i < kNumIterationsTOCTOU; i++) {
+ // Save the modelAdd compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(modelAdd, modelCache, dataCache);
+ }
+
+ // Retrieve preparedModel from cache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+
+ // Spawn a thread to copy the cache content concurrently while preparing from cache.
+ std::thread thread(copyCacheFiles, std::cref(modelCacheMul), std::cref(mModelCache));
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ thread.join();
+
+ // The preparation may fail or succeed, but must not crash. If the preparation succeeds,
+ // the prepared model must be executed with the correct result and not crash.
+ if (status != ErrorStatus::NONE) {
+ ASSERT_EQ(preparedModel, nullptr);
+ } else {
+ ASSERT_NE(preparedModel, nullptr);
+ EvaluatePreparedModel(preparedModel, testModelAdd,
+ /*testDynamicOutputShape=*/false);
+ }
+ }
+ }
+}
+
+TEST_P(CompilationCachingTest, ReplaceSecuritySensitiveCache) {
+ if (!mIsCachingSupported) return;
+
+ // Create test models and check if fully supported by the service.
+ const TestModel testModelMul = createLargeTestModel(OperationType::MUL, kLargeModelSize);
+ const Model modelMul = createModel(testModelMul);
+ if (checkEarlyTermination(modelMul)) return;
+ const TestModel testModelAdd = createLargeTestModel(OperationType::ADD, kLargeModelSize);
+ const Model modelAdd = createModel(testModelAdd);
+ if (checkEarlyTermination(modelAdd)) return;
+
+ // Save the modelMul compilation to cache.
+ auto modelCacheMul = mModelCache;
+ for (auto& cache : modelCacheMul) {
+ cache[0].append("_mul");
+ }
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(modelCacheMul, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(modelMul, modelCache, dataCache);
+ }
+
+ // Use a different token for modelAdd.
+ mToken[0]++;
+
+ // Save the modelAdd compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(modelAdd, modelCache, dataCache);
+ }
+
+ // Replace the model cache of modelAdd with modelMul.
+ copyCacheFiles(modelCacheMul, mModelCache);
+
+ // Retrieve the preparedModel from cache, expect failure.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ }
+}
+
+static const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
+static const auto kOperandTypeChoices =
+ testing::Values(OperandType::TENSOR_FLOAT32, OperandType::TENSOR_QUANT8_ASYMM);
+
+std::string printCompilationCachingTest(
+ const testing::TestParamInfo<CompilationCachingTestParam>& info) {
+ const auto& [namedDevice, operandType] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type);
+}
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingTest,
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices),
+ printCompilationCachingTest);
+
+using CompilationCachingSecurityTestParam = std::tuple<NamedDevice, OperandType, uint32_t>;
+
+class CompilationCachingSecurityTest
+ : public CompilationCachingTestBase,
+ public testing::WithParamInterface<CompilationCachingSecurityTestParam> {
+ protected:
+ CompilationCachingSecurityTest()
+ : CompilationCachingTestBase(getData(std::get<NamedDevice>(GetParam())),
+ std::get<OperandType>(GetParam())) {}
+
+ void SetUp() {
+ CompilationCachingTestBase::SetUp();
+ generator.seed(kSeed);
+ }
+
+ // Get a random integer within a closed range [lower, upper].
+ template <typename T>
+ T getRandomInt(T lower, T upper) {
+ std::uniform_int_distribution<T> dis(lower, upper);
+ return dis(generator);
+ }
+
+ // Randomly flip one single bit of the cache entry.
+ void flipOneBitOfCache(const std::string& filename, bool* skip) {
+ FILE* pFile = fopen(filename.c_str(), "r+");
+ ASSERT_EQ(fseek(pFile, 0, SEEK_END), 0);
+ long int fileSize = ftell(pFile);
+ if (fileSize == 0) {
+ fclose(pFile);
+ *skip = true;
+ return;
+ }
+ ASSERT_EQ(fseek(pFile, getRandomInt(0l, fileSize - 1), SEEK_SET), 0);
+ int readByte = fgetc(pFile);
+ ASSERT_NE(readByte, EOF);
+ ASSERT_EQ(fseek(pFile, -1, SEEK_CUR), 0);
+ ASSERT_NE(fputc(static_cast<uint8_t>(readByte) ^ (1U << getRandomInt(0, 7)), pFile), EOF);
+ fclose(pFile);
+ *skip = false;
+ }
+
+ // Randomly append bytes to the cache entry.
+ void appendBytesToCache(const std::string& filename, bool* skip) {
+ FILE* pFile = fopen(filename.c_str(), "a");
+ uint32_t appendLength = getRandomInt(1, 256);
+ for (uint32_t i = 0; i < appendLength; i++) {
+ ASSERT_NE(fputc(getRandomInt<uint8_t>(0, 255), pFile), EOF);
+ }
+ fclose(pFile);
+ *skip = false;
+ }
+
+ enum class ExpectedResult { GENERAL_FAILURE, NOT_CRASH };
+
+ // Test if the driver behaves as expected when given corrupted cache or token.
+ // The modifier will be invoked after save to cache but before prepare from cache.
+ // The modifier accepts one pointer argument "skip" as the returning value, indicating
+ // whether the test should be skipped or not.
+ void testCorruptedCache(ExpectedResult expected, std::function<void(bool*)> modifier) {
+ const TestModel& testModel = createTestModel();
+ const Model model = createModel(testModel);
+ if (checkEarlyTermination(model)) return;
+
+ // Save the compilation to cache.
+ {
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ saveModelToCache(model, modelCache, dataCache);
+ }
+
+ bool skip = false;
+ modifier(&skip);
+ if (skip) return;
+
+ // Retrieve preparedModel from cache.
+ {
+ sp<IPreparedModel> preparedModel = nullptr;
+ ErrorStatus status;
+ hidl_vec<hidl_handle> modelCache, dataCache;
+ createCacheHandles(mModelCache, AccessMode::READ_WRITE, &modelCache);
+ createCacheHandles(mDataCache, AccessMode::READ_WRITE, &dataCache);
+ prepareModelFromCache(modelCache, dataCache, &preparedModel, &status);
+
+ switch (expected) {
+ case ExpectedResult::GENERAL_FAILURE:
+ ASSERT_EQ(status, ErrorStatus::GENERAL_FAILURE);
+ ASSERT_EQ(preparedModel, nullptr);
+ break;
+ case ExpectedResult::NOT_CRASH:
+ ASSERT_EQ(preparedModel == nullptr, status != ErrorStatus::NONE);
+ break;
+ default:
+ FAIL();
+ }
+ }
+ }
+
+ const uint32_t kSeed = std::get<uint32_t>(GetParam());
+ std::mt19937 generator;
+};
+
+TEST_P(CompilationCachingSecurityTest, CorruptedModelCache) {
+ if (!mIsCachingSupported) return;
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
+ [this, i](bool* skip) { flipOneBitOfCache(mModelCache[i][0], skip); });
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongLengthModelCache) {
+ if (!mIsCachingSupported) return;
+ for (uint32_t i = 0; i < mNumModelCache; i++) {
+ testCorruptedCache(ExpectedResult::GENERAL_FAILURE,
+ [this, i](bool* skip) { appendBytesToCache(mModelCache[i][0], skip); });
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, CorruptedDataCache) {
+ if (!mIsCachingSupported) return;
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ testCorruptedCache(ExpectedResult::NOT_CRASH,
+ [this, i](bool* skip) { flipOneBitOfCache(mDataCache[i][0], skip); });
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongLengthDataCache) {
+ if (!mIsCachingSupported) return;
+ for (uint32_t i = 0; i < mNumDataCache; i++) {
+ testCorruptedCache(ExpectedResult::NOT_CRASH,
+ [this, i](bool* skip) { appendBytesToCache(mDataCache[i][0], skip); });
+ }
+}
+
+TEST_P(CompilationCachingSecurityTest, WrongToken) {
+ if (!mIsCachingSupported) return;
+ testCorruptedCache(ExpectedResult::GENERAL_FAILURE, [this](bool* skip) {
+ // Randomly flip one single bit in mToken.
+ uint32_t ind =
+ getRandomInt(0u, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN) - 1);
+ mToken[ind] ^= (1U << getRandomInt(0, 7));
+ *skip = false;
+ });
+}
+
+std::string printCompilationCachingSecurityTest(
+ const testing::TestParamInfo<CompilationCachingSecurityTestParam>& info) {
+ const auto& [namedDevice, operandType, seed] = info.param;
+ const std::string type = (operandType == OperandType::TENSOR_FLOAT32 ? "float32" : "quant8");
+ return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + std::to_string(seed));
+}
+
+INSTANTIATE_TEST_CASE_P(TestCompilationCaching, CompilationCachingSecurityTest,
+ testing::Combine(kNamedDeviceChoices, kOperandTypeChoices,
+ testing::Range(0U, 10U)),
+ printCompilationCachingSecurityTest);
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
new file mode 100644
index 0000000..8a7ed24
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GeneratedTestHarness.h"
+
+#include <android-base/logging.h>
+#include <android/hardware/neuralnetworks/1.0/IDevice.h>
+#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IDevice.h>
+#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <android/hidl/allocator/1.0/IAllocator.h>
+#include <android/hidl/memory/1.0/IMemory.h>
+#include <hidlmemory/mapping.h>
+
+#include <gtest/gtest.h>
+#include <algorithm>
+#include <chrono>
+#include <iostream>
+#include <numeric>
+
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using namespace test_helper;
+using hidl::memory::V1_0::IMemory;
+using V1_0::DataLocation;
+using V1_0::ErrorStatus;
+using V1_0::OperandLifeTime;
+using V1_0::Request;
+using V1_1::ExecutionPreference;
+using V1_2::Constant;
+using V1_2::IPreparedModel;
+using V1_2::MeasureTiming;
+using V1_2::OperationType;
+using V1_2::OutputShape;
+using V1_2::SymmPerChannelQuantParams;
+using V1_2::Timing;
+using V1_2::implementation::ExecutionCallback;
+using V1_2::implementation::PreparedModelCallback;
+using HidlToken = hidl_array<uint8_t, static_cast<uint32_t>(Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
+enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT };
+
+Model createModel(const TestModel& testModel) {
+ // Model operands.
+ hidl_vec<Operand> operands(testModel.operands.size());
+ size_t constCopySize = 0, constRefSize = 0;
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+
+ DataLocation loc = {};
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constCopySize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constCopySize += op.data.alignedSize();
+ } else if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ loc = {.poolIndex = 0,
+ .offset = static_cast<uint32_t>(constRefSize),
+ .length = static_cast<uint32_t>(op.data.size())};
+ constRefSize += op.data.alignedSize();
+ }
+
+ Operand::ExtraParams extraParams;
+ if (op.type == TestOperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) {
+ extraParams.channelQuant(SymmPerChannelQuantParams{
+ .scales = op.channelQuant.scales, .channelDim = op.channelQuant.channelDim});
+ }
+
+ operands[i] = {.type = static_cast<OperandType>(op.type),
+ .dimensions = op.dimensions,
+ .numberOfConsumers = op.numberOfConsumers,
+ .scale = op.scale,
+ .zeroPoint = op.zeroPoint,
+ .lifetime = static_cast<OperandLifeTime>(op.lifetime),
+ .location = loc,
+ .extraParams = std::move(extraParams)};
+ }
+
+ // Model operations.
+ hidl_vec<Operation> operations(testModel.operations.size());
+ std::transform(testModel.operations.begin(), testModel.operations.end(), operations.begin(),
+ [](const TestOperation& op) -> Operation {
+ return {.type = static_cast<OperationType>(op.type),
+ .inputs = op.inputs,
+ .outputs = op.outputs};
+ });
+
+ // Constant copies.
+ hidl_vec<uint8_t> operandValues(constCopySize);
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_COPY) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, operandValues.data() + operands[i].location.offset);
+ }
+ }
+
+ // Shared memory.
+ hidl_vec<hidl_memory> pools = {};
+ if (constRefSize > 0) {
+ hidl_vec_push_back(&pools, nn::allocateSharedMemory(constRefSize));
+ CHECK_NE(pools[0].size(), 0u);
+
+ // load data
+ sp<IMemory> mappedMemory = mapMemory(pools[0]);
+ CHECK(mappedMemory.get() != nullptr);
+ uint8_t* mappedPtr =
+ reinterpret_cast<uint8_t*>(static_cast<void*>(mappedMemory->getPointer()));
+ CHECK(mappedPtr != nullptr);
+
+ for (uint32_t i = 0; i < testModel.operands.size(); i++) {
+ const auto& op = testModel.operands[i];
+ if (op.lifetime == TestOperandLifeTime::CONSTANT_REFERENCE) {
+ const uint8_t* begin = op.data.get<uint8_t>();
+ const uint8_t* end = begin + op.data.size();
+ std::copy(begin, end, mappedPtr + operands[i].location.offset);
+ }
+ }
+ }
+
+ return {.operands = std::move(operands),
+ .operations = std::move(operations),
+ .inputIndexes = testModel.inputIndexes,
+ .outputIndexes = testModel.outputIndexes,
+ .operandValues = std::move(operandValues),
+ .pools = std::move(pools),
+ .relaxComputationFloat32toFloat16 = testModel.isRelaxed};
+}
+
+static bool isOutputSizeGreaterThanOne(const TestModel& testModel, uint32_t index) {
+ const auto byteSize = testModel.operands[testModel.outputIndexes[index]].data.size();
+ return byteSize > 1u;
+}
+
+static void makeOutputInsufficientSize(uint32_t outputIndex, Request* request) {
+ auto& length = request->outputs[outputIndex].location.length;
+ ASSERT_GT(length, 1u);
+ length -= 1u;
+}
+
+static void makeOutputDimensionsUnspecified(Model* model) {
+ for (auto i : model->outputIndexes) {
+ auto& dims = model->operands[i].dimensions;
+ std::fill(dims.begin(), dims.end(), 0);
+ }
+}
+
+static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
+ const Request& request, MeasureTiming measure,
+ sp<ExecutionCallback>& callback) {
+ return preparedModel->execute_1_2(request, measure, callback);
+}
+static Return<ErrorStatus> ExecutePreparedModel(const sp<IPreparedModel>& preparedModel,
+ const Request& request, MeasureTiming measure,
+ hidl_vec<OutputShape>* outputShapes,
+ Timing* timing) {
+ ErrorStatus result;
+ Return<void> ret = preparedModel->executeSynchronously(
+ request, measure,
+ [&result, outputShapes, timing](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
+ const Timing& time) {
+ result = error;
+ *outputShapes = shapes;
+ *timing = time;
+ });
+ if (!ret.isOk()) {
+ return ErrorStatus::GENERAL_FAILURE;
+ }
+ return result;
+}
+static std::shared_ptr<::android::nn::ExecutionBurstController> CreateBurst(
+ const sp<IPreparedModel>& preparedModel) {
+ return android::nn::ExecutionBurstController::create(preparedModel,
+ std::chrono::microseconds{0});
+}
+enum class Executor { ASYNC, SYNC, BURST };
+
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
+ Executor executor, MeasureTiming measure, OutputType outputType) {
+ // If output0 does not have size larger than one byte, we can not test with insufficient buffer.
+ if (outputType == OutputType::INSUFFICIENT && !isOutputSizeGreaterThanOne(testModel, 0)) {
+ return;
+ }
+
+ Request request = createRequest(testModel);
+ if (outputType == OutputType::INSUFFICIENT) {
+ makeOutputInsufficientSize(/*outputIndex=*/0, &request);
+ }
+
+ ErrorStatus executionStatus;
+ hidl_vec<OutputShape> outputShapes;
+ Timing timing;
+ switch (executor) {
+ case Executor::ASYNC: {
+ SCOPED_TRACE("asynchronous");
+
+ // launch execution
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ Return<ErrorStatus> executionLaunchStatus =
+ ExecutePreparedModel(preparedModel, request, measure, executionCallback);
+ ASSERT_TRUE(executionLaunchStatus.isOk());
+ EXPECT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(executionLaunchStatus));
+
+ // retrieve execution status
+ executionCallback->wait();
+ executionStatus = executionCallback->getStatus();
+ outputShapes = executionCallback->getOutputShapes();
+ timing = executionCallback->getTiming();
+
+ break;
+ }
+ case Executor::SYNC: {
+ SCOPED_TRACE("synchronous");
+
+ // execute
+ Return<ErrorStatus> executionReturnStatus =
+ ExecutePreparedModel(preparedModel, request, measure, &outputShapes, &timing);
+ ASSERT_TRUE(executionReturnStatus.isOk());
+ executionStatus = static_cast<ErrorStatus>(executionReturnStatus);
+
+ break;
+ }
+ case Executor::BURST: {
+ SCOPED_TRACE("burst");
+
+ // create burst
+ const std::shared_ptr<::android::nn::ExecutionBurstController> controller =
+ CreateBurst(preparedModel);
+ ASSERT_NE(nullptr, controller.get());
+
+ // create memory keys
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ }
+
+ // execute burst
+ int n;
+ std::tie(n, outputShapes, timing, std::ignore) =
+ controller->compute(request, measure, keys);
+ executionStatus = nn::convertResultCodeToErrorStatus(n);
+
+ break;
+ }
+ }
+
+ if (outputType != OutputType::FULLY_SPECIFIED &&
+ executionStatus == ErrorStatus::GENERAL_FAILURE) {
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot "
+ "execute model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "execute model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ if (measure == MeasureTiming::NO) {
+ EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+ EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+ } else {
+ if (timing.timeOnDevice != UINT64_MAX && timing.timeInDriver != UINT64_MAX) {
+ EXPECT_LE(timing.timeOnDevice, timing.timeInDriver);
+ }
+ }
+
+ switch (outputType) {
+ case OutputType::FULLY_SPECIFIED:
+ // If the model output operands are fully specified, outputShapes must be either
+ // either empty, or have the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_TRUE(outputShapes.size() == 0 ||
+ outputShapes.size() == testModel.outputIndexes.size());
+ break;
+ case OutputType::UNSPECIFIED:
+ // If the model output operands are not fully specified, outputShapes must have
+ // the same number of elements as the number of outputs.
+ ASSERT_EQ(ErrorStatus::NONE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
+ break;
+ case OutputType::INSUFFICIENT:
+ ASSERT_EQ(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, executionStatus);
+ ASSERT_EQ(outputShapes.size(), testModel.outputIndexes.size());
+ ASSERT_FALSE(outputShapes[0].isSufficient);
+ return;
+ }
+
+ // Go through all outputs, check returned output shapes.
+ for (uint32_t i = 0; i < outputShapes.size(); i++) {
+ EXPECT_TRUE(outputShapes[i].isSufficient);
+ const auto& expect = testModel.operands[testModel.outputIndexes[i]].dimensions;
+ const std::vector<uint32_t> actual = outputShapes[i].dimensions;
+ EXPECT_EQ(expect, actual);
+ }
+
+ // Retrieve execution results.
+ const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+
+ // We want "close-enough" results.
+ checkResults(testModel, outputs);
+}
+
+void EvaluatePreparedModel(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
+ bool testDynamicOutputShape) {
+ if (testDynamicOutputShape) {
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::UNSPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::INSUFFICIENT);
+ } else {
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::NO,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::ASYNC, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::SYNC, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
+ EvaluatePreparedModel(preparedModel, testModel, Executor::BURST, MeasureTiming::YES,
+ OutputType::FULLY_SPECIFIED);
+ }
+}
+
+void Execute(const sp<IDevice>& device, const TestModel& testModel, bool testDynamicOutputShape) {
+ Model model = createModel(testModel);
+ if (testDynamicOutputShape) {
+ makeOutputDimensionsUnspecified(&model);
+ }
+
+ sp<IPreparedModel> preparedModel;
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+
+ EvaluatePreparedModel(preparedModel, testModel, testDynamicOutputShape);
+}
+
+void GeneratedTestBase::SetUp() {
+ testing::TestWithParam<GeneratedTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
+}
+
+std::vector<NamedModel> getNamedModels(const FilterFn& filter) {
+ return TestModelManager::get().getTestModels(filter);
+}
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
+ const auto& [namedDevice, namedModel] = info.param;
+ return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
+}
+
+// Tag for the generated tests
+class GeneratedTest : public GeneratedTestBase {};
+
+// Tag for the dynamic output shape tests
+class DynamicOutputShapeTest : public GeneratedTest {};
+
+TEST_P(GeneratedTest, Test) {
+ Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/false);
+}
+
+TEST_P(DynamicOutputShapeTest, Test) {
+ Execute(kDevice, kTestModel, /*testDynamicOutputShape=*/true);
+}
+
+INSTANTIATE_GENERATED_TEST(GeneratedTest,
+ [](const TestModel& testModel) { return !testModel.expectFailure; });
+
+INSTANTIATE_GENERATED_TEST(DynamicOutputShapeTest,
+ [](const TestModel& testModel) { return !testModel.expectFailure; });
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
new file mode 100644
index 0000000..b9277cf
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
+
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <functional>
+#include <vector>
+#include "1.0/Utils.h"
+#include "TestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using NamedModel = Named<const test_helper::TestModel*>;
+using GeneratedTestParam = std::tuple<NamedDevice, NamedModel>;
+
+class GeneratedTestBase : public testing::TestWithParam<GeneratedTestParam> {
+ protected:
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(std::get<NamedDevice>(GetParam()));
+ const test_helper::TestModel& kTestModel = *getData(std::get<NamedModel>(GetParam()));
+};
+
+using FilterFn = std::function<bool(const test_helper::TestModel&)>;
+std::vector<NamedModel> getNamedModels(const FilterFn& filter);
+
+std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
+
+#define INSTANTIATE_GENERATED_TEST(TestSuite, filter) \
+ INSTANTIATE_TEST_SUITE_P(TestGenerated, TestSuite, \
+ testing::Combine(testing::ValuesIn(getNamedDevices()), \
+ testing::ValuesIn(getNamedModels(filter))), \
+ printGeneratedTest)
+
+// Tag for the validation tests, instantiated in VtsHalNeuralnetworks.cpp.
+// TODO: Clean up the hierarchy for ValidationTest.
+class ValidationTest : public GeneratedTestBase {};
+
+Model createModel(const test_helper::TestModel& testModel);
+
+void PrepareModel(const sp<IDevice>& device, const Model& model,
+ sp<V1_2::IPreparedModel>* preparedModel);
+
+void EvaluatePreparedModel(const sp<V1_2::IPreparedModel>& preparedModel,
+ const test_helper::TestModel& testModel, bool testDynamicOutputShape);
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.3/vts/functional/TestAssertions.cpp b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
new file mode 100644
index 0000000..7361078
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/TestAssertions.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include "TestHarness.h"
+
+namespace android::hardware::neuralnetworks::V1_3 {
+
+// Make sure that the HIDL enums are compatible with the values defined in
+// frameworks/ml/nn/tools/test_generator/test_harness/include/TestHarness.h.
+using namespace test_helper;
+#define CHECK_TEST_ENUM(EnumType, enumValue) \
+ static_assert(static_cast<EnumType>(Test##EnumType::enumValue) == EnumType::enumValue)
+
+using V1_2::OperationType;
+
+CHECK_TEST_ENUM(OperandType, FLOAT32);
+CHECK_TEST_ENUM(OperandType, INT32);
+CHECK_TEST_ENUM(OperandType, UINT32);
+CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT32);
+CHECK_TEST_ENUM(OperandType, TENSOR_INT32);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM);
+CHECK_TEST_ENUM(OperandType, BOOL);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_SYMM);
+CHECK_TEST_ENUM(OperandType, TENSOR_FLOAT16);
+CHECK_TEST_ENUM(OperandType, TENSOR_BOOL8);
+CHECK_TEST_ENUM(OperandType, FLOAT16);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM_PER_CHANNEL);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT16_ASYMM);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_SYMM);
+CHECK_TEST_ENUM(OperandType, TENSOR_QUANT8_ASYMM_SIGNED);
+
+CHECK_TEST_ENUM(OperationType, ADD);
+CHECK_TEST_ENUM(OperationType, AVERAGE_POOL_2D);
+CHECK_TEST_ENUM(OperationType, CONCATENATION);
+CHECK_TEST_ENUM(OperationType, CONV_2D);
+CHECK_TEST_ENUM(OperationType, DEPTHWISE_CONV_2D);
+CHECK_TEST_ENUM(OperationType, DEPTH_TO_SPACE);
+CHECK_TEST_ENUM(OperationType, DEQUANTIZE);
+CHECK_TEST_ENUM(OperationType, EMBEDDING_LOOKUP);
+CHECK_TEST_ENUM(OperationType, FLOOR);
+CHECK_TEST_ENUM(OperationType, FULLY_CONNECTED);
+CHECK_TEST_ENUM(OperationType, HASHTABLE_LOOKUP);
+CHECK_TEST_ENUM(OperationType, L2_NORMALIZATION);
+CHECK_TEST_ENUM(OperationType, L2_POOL_2D);
+CHECK_TEST_ENUM(OperationType, LOCAL_RESPONSE_NORMALIZATION);
+CHECK_TEST_ENUM(OperationType, LOGISTIC);
+CHECK_TEST_ENUM(OperationType, LSH_PROJECTION);
+CHECK_TEST_ENUM(OperationType, LSTM);
+CHECK_TEST_ENUM(OperationType, MAX_POOL_2D);
+CHECK_TEST_ENUM(OperationType, MUL);
+CHECK_TEST_ENUM(OperationType, RELU);
+CHECK_TEST_ENUM(OperationType, RELU1);
+CHECK_TEST_ENUM(OperationType, RELU6);
+CHECK_TEST_ENUM(OperationType, RESHAPE);
+CHECK_TEST_ENUM(OperationType, RESIZE_BILINEAR);
+CHECK_TEST_ENUM(OperationType, RNN);
+CHECK_TEST_ENUM(OperationType, SOFTMAX);
+CHECK_TEST_ENUM(OperationType, SPACE_TO_DEPTH);
+CHECK_TEST_ENUM(OperationType, SVDF);
+CHECK_TEST_ENUM(OperationType, TANH);
+CHECK_TEST_ENUM(OperationType, BATCH_TO_SPACE_ND);
+CHECK_TEST_ENUM(OperationType, DIV);
+CHECK_TEST_ENUM(OperationType, MEAN);
+CHECK_TEST_ENUM(OperationType, PAD);
+CHECK_TEST_ENUM(OperationType, SPACE_TO_BATCH_ND);
+CHECK_TEST_ENUM(OperationType, SQUEEZE);
+CHECK_TEST_ENUM(OperationType, STRIDED_SLICE);
+CHECK_TEST_ENUM(OperationType, SUB);
+CHECK_TEST_ENUM(OperationType, TRANSPOSE);
+CHECK_TEST_ENUM(OperationType, ABS);
+CHECK_TEST_ENUM(OperationType, ARGMAX);
+CHECK_TEST_ENUM(OperationType, ARGMIN);
+CHECK_TEST_ENUM(OperationType, AXIS_ALIGNED_BBOX_TRANSFORM);
+CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_LSTM);
+CHECK_TEST_ENUM(OperationType, BIDIRECTIONAL_SEQUENCE_RNN);
+CHECK_TEST_ENUM(OperationType, BOX_WITH_NMS_LIMIT);
+CHECK_TEST_ENUM(OperationType, CAST);
+CHECK_TEST_ENUM(OperationType, CHANNEL_SHUFFLE);
+CHECK_TEST_ENUM(OperationType, DETECTION_POSTPROCESSING);
+CHECK_TEST_ENUM(OperationType, EQUAL);
+CHECK_TEST_ENUM(OperationType, EXP);
+CHECK_TEST_ENUM(OperationType, EXPAND_DIMS);
+CHECK_TEST_ENUM(OperationType, GATHER);
+CHECK_TEST_ENUM(OperationType, GENERATE_PROPOSALS);
+CHECK_TEST_ENUM(OperationType, GREATER);
+CHECK_TEST_ENUM(OperationType, GREATER_EQUAL);
+CHECK_TEST_ENUM(OperationType, GROUPED_CONV_2D);
+CHECK_TEST_ENUM(OperationType, HEATMAP_MAX_KEYPOINT);
+CHECK_TEST_ENUM(OperationType, INSTANCE_NORMALIZATION);
+CHECK_TEST_ENUM(OperationType, LESS);
+CHECK_TEST_ENUM(OperationType, LESS_EQUAL);
+CHECK_TEST_ENUM(OperationType, LOG);
+CHECK_TEST_ENUM(OperationType, LOGICAL_AND);
+CHECK_TEST_ENUM(OperationType, LOGICAL_NOT);
+CHECK_TEST_ENUM(OperationType, LOGICAL_OR);
+CHECK_TEST_ENUM(OperationType, LOG_SOFTMAX);
+CHECK_TEST_ENUM(OperationType, MAXIMUM);
+CHECK_TEST_ENUM(OperationType, MINIMUM);
+CHECK_TEST_ENUM(OperationType, NEG);
+CHECK_TEST_ENUM(OperationType, NOT_EQUAL);
+CHECK_TEST_ENUM(OperationType, PAD_V2);
+CHECK_TEST_ENUM(OperationType, POW);
+CHECK_TEST_ENUM(OperationType, PRELU);
+CHECK_TEST_ENUM(OperationType, QUANTIZE);
+CHECK_TEST_ENUM(OperationType, QUANTIZED_16BIT_LSTM);
+CHECK_TEST_ENUM(OperationType, RANDOM_MULTINOMIAL);
+CHECK_TEST_ENUM(OperationType, REDUCE_ALL);
+CHECK_TEST_ENUM(OperationType, REDUCE_ANY);
+CHECK_TEST_ENUM(OperationType, REDUCE_MAX);
+CHECK_TEST_ENUM(OperationType, REDUCE_MIN);
+CHECK_TEST_ENUM(OperationType, REDUCE_PROD);
+CHECK_TEST_ENUM(OperationType, REDUCE_SUM);
+CHECK_TEST_ENUM(OperationType, ROI_ALIGN);
+CHECK_TEST_ENUM(OperationType, ROI_POOLING);
+CHECK_TEST_ENUM(OperationType, RSQRT);
+CHECK_TEST_ENUM(OperationType, SELECT);
+CHECK_TEST_ENUM(OperationType, SIN);
+CHECK_TEST_ENUM(OperationType, SLICE);
+CHECK_TEST_ENUM(OperationType, SPLIT);
+CHECK_TEST_ENUM(OperationType, SQRT);
+CHECK_TEST_ENUM(OperationType, TILE);
+CHECK_TEST_ENUM(OperationType, TOPK_V2);
+CHECK_TEST_ENUM(OperationType, TRANSPOSE_CONV_2D);
+CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_LSTM);
+CHECK_TEST_ENUM(OperationType, UNIDIRECTIONAL_SEQUENCE_RNN);
+CHECK_TEST_ENUM(OperationType, RESIZE_NEAREST_NEIGHBOR);
+
+#undef CHECK_TEST_ENUM
+
+} // namespace android::hardware::neuralnetworks::V1_3
diff --git a/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
new file mode 100644
index 0000000..2c97294
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/ValidateBurst.cpp
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "ExecutionBurstServer.h"
+#include "GeneratedTestHarness.h"
+#include "TestHarness.h"
+#include "Utils.h"
+
+#include <android-base/logging.h>
+#include <chrono>
+#include <cstring>
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using nn::ExecutionBurstController;
+using nn::RequestChannelSender;
+using nn::ResultChannelReceiver;
+using V1_0::ErrorStatus;
+using V1_0::Request;
+using V1_2::FmqRequestDatum;
+using V1_2::FmqResultDatum;
+using V1_2::IBurstCallback;
+using V1_2::IBurstContext;
+using V1_2::IPreparedModel;
+using V1_2::MeasureTiming;
+using V1_2::Timing;
+using ExecutionBurstCallback = ExecutionBurstController::ExecutionBurstCallback;
+
+// This constant value represents the length of an FMQ that is large enough to
+// return a result from a burst execution for all of the generated test cases.
+constexpr size_t kExecutionBurstChannelLength = 1024;
+
+// This constant value represents a length of an FMQ that is not large enough
+// to return a result from a burst execution for some of the generated test
+// cases.
+constexpr size_t kExecutionBurstChannelSmallLength = 8;
+
+///////////////////////// UTILITY FUNCTIONS /////////////////////////
+
+static bool badTiming(Timing timing) {
+ return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
+}
+
+static void createBurst(const sp<IPreparedModel>& preparedModel, const sp<IBurstCallback>& callback,
+ std::unique_ptr<RequestChannelSender>* sender,
+ std::unique_ptr<ResultChannelReceiver>* receiver,
+ sp<IBurstContext>* context,
+ size_t resultChannelLength = kExecutionBurstChannelLength) {
+ ASSERT_NE(nullptr, preparedModel.get());
+ ASSERT_NE(nullptr, sender);
+ ASSERT_NE(nullptr, receiver);
+ ASSERT_NE(nullptr, context);
+
+ // create FMQ objects
+ auto [fmqRequestChannel, fmqRequestDescriptor] =
+ RequestChannelSender::create(kExecutionBurstChannelLength);
+ auto [fmqResultChannel, fmqResultDescriptor] =
+ ResultChannelReceiver::create(resultChannelLength, std::chrono::microseconds{0});
+ ASSERT_NE(nullptr, fmqRequestChannel.get());
+ ASSERT_NE(nullptr, fmqResultChannel.get());
+ ASSERT_NE(nullptr, fmqRequestDescriptor);
+ ASSERT_NE(nullptr, fmqResultDescriptor);
+
+ // configure burst
+ ErrorStatus errorStatus;
+ sp<IBurstContext> burstContext;
+ const Return<void> ret = preparedModel->configureExecutionBurst(
+ callback, *fmqRequestDescriptor, *fmqResultDescriptor,
+ [&errorStatus, &burstContext](ErrorStatus status, const sp<IBurstContext>& context) {
+ errorStatus = status;
+ burstContext = context;
+ });
+ ASSERT_TRUE(ret.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, errorStatus);
+ ASSERT_NE(nullptr, burstContext.get());
+
+ // return values
+ *sender = std::move(fmqRequestChannel);
+ *receiver = std::move(fmqResultChannel);
+ *context = burstContext;
+}
+
+static void createBurstWithResultChannelLength(
+ const sp<IPreparedModel>& preparedModel, size_t resultChannelLength,
+ std::shared_ptr<ExecutionBurstController>* controller) {
+ ASSERT_NE(nullptr, preparedModel.get());
+ ASSERT_NE(nullptr, controller);
+
+ // create FMQ objects
+ std::unique_ptr<RequestChannelSender> sender;
+ std::unique_ptr<ResultChannelReceiver> receiver;
+ sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+ sp<IBurstContext> context;
+ ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context,
+ resultChannelLength));
+ ASSERT_NE(nullptr, sender.get());
+ ASSERT_NE(nullptr, receiver.get());
+ ASSERT_NE(nullptr, context.get());
+
+ // return values
+ *controller = std::make_shared<ExecutionBurstController>(std::move(sender), std::move(receiver),
+ context, callback);
+}
+
+// Primary validation function. This function will take a valid serialized
+// request, apply a mutation to it to invalidate the serialized request, then
+// pass it to interface calls that use the serialized request. Note that the
+// serialized request here is passed by value, and any mutation to the
+// serialized request does not leave this function.
+static void validate(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+ const std::string& message, std::vector<FmqRequestDatum> serialized,
+ const std::function<void(std::vector<FmqRequestDatum>*)>& mutation) {
+ mutation(&serialized);
+
+ // skip if packet is too large to send
+ if (serialized.size() > kExecutionBurstChannelLength) {
+ return;
+ }
+
+ SCOPED_TRACE(message);
+
+ // send invalid packet
+ ASSERT_TRUE(sender->sendPacket(serialized));
+
+ // receive error
+ auto results = receiver->getBlocking();
+ ASSERT_TRUE(results.has_value());
+ const auto [status, outputShapes, timing] = std::move(*results);
+ EXPECT_NE(ErrorStatus::NONE, status);
+ EXPECT_EQ(0u, outputShapes.size());
+ EXPECT_TRUE(badTiming(timing));
+}
+
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// creates pre-set invalid packet entries for convenience.
+static std::vector<FmqRequestDatum> createBadRequestPacketEntries() {
+ const FmqRequestDatum::PacketInformation packetInformation = {
+ /*.packetSize=*/10, /*.numberOfInputOperands=*/10, /*.numberOfOutputOperands=*/10,
+ /*.numberOfPools=*/10};
+ const FmqRequestDatum::OperandInformation operandInformation = {
+ /*.hasNoValue=*/false, /*.location=*/{}, /*.numberOfDimensions=*/10};
+ const int32_t invalidPoolIdentifier = std::numeric_limits<int32_t>::max();
+ std::vector<FmqRequestDatum> bad(7);
+ bad[0].packetInformation(packetInformation);
+ bad[1].inputOperandInformation(operandInformation);
+ bad[2].inputOperandDimensionValue(0);
+ bad[3].outputOperandInformation(operandInformation);
+ bad[4].outputOperandDimensionValue(0);
+ bad[5].poolIdentifier(invalidPoolIdentifier);
+ bad[6].measureTiming(MeasureTiming::YES);
+ return bad;
+}
+
+// For validation, valid packet entries are mutated to invalid packet entries,
+// or invalid packet entries are inserted into valid packets. This function
+// retrieves pre-set invalid packet entries for convenience. This function
+// caches these data so they can be reused on subsequent validation checks.
+static const std::vector<FmqRequestDatum>& getBadRequestPacketEntries() {
+ static const std::vector<FmqRequestDatum> bad = createBadRequestPacketEntries();
+ return bad;
+}
+
+///////////////////////// REMOVE DATUM ////////////////////////////////////
+
+static void removeDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+ const std::vector<FmqRequestDatum>& serialized) {
+ for (size_t index = 0; index < serialized.size(); ++index) {
+ const std::string message = "removeDatum: removed datum at index " + std::to_string(index);
+ validate(sender, receiver, message, serialized,
+ [index](std::vector<FmqRequestDatum>* serialized) {
+ serialized->erase(serialized->begin() + index);
+ });
+ }
+}
+
+///////////////////////// ADD DATUM ////////////////////////////////////
+
+static void addDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+ const std::vector<FmqRequestDatum>& serialized) {
+ const std::vector<FmqRequestDatum>& extra = getBadRequestPacketEntries();
+ for (size_t index = 0; index <= serialized.size(); ++index) {
+ for (size_t type = 0; type < extra.size(); ++type) {
+ const std::string message = "addDatum: added datum type " + std::to_string(type) +
+ " at index " + std::to_string(index);
+ validate(sender, receiver, message, serialized,
+ [index, type, &extra](std::vector<FmqRequestDatum>* serialized) {
+ serialized->insert(serialized->begin() + index, extra[type]);
+ });
+ }
+ }
+}
+
+///////////////////////// MUTATE DATUM ////////////////////////////////////
+
+static bool interestingCase(const FmqRequestDatum& lhs, const FmqRequestDatum& rhs) {
+ using Discriminator = FmqRequestDatum::hidl_discriminator;
+
+ const bool differentValues = (lhs != rhs);
+ const bool sameDiscriminator = (lhs.getDiscriminator() == rhs.getDiscriminator());
+ const auto discriminator = rhs.getDiscriminator();
+ const bool isDimensionValue = (discriminator == Discriminator::inputOperandDimensionValue ||
+ discriminator == Discriminator::outputOperandDimensionValue);
+
+ return differentValues && !(sameDiscriminator && isDimensionValue);
+}
+
+static void mutateDatumTest(RequestChannelSender* sender, ResultChannelReceiver* receiver,
+ const std::vector<FmqRequestDatum>& serialized) {
+ const std::vector<FmqRequestDatum>& change = getBadRequestPacketEntries();
+ for (size_t index = 0; index < serialized.size(); ++index) {
+ for (size_t type = 0; type < change.size(); ++type) {
+ if (interestingCase(serialized[index], change[type])) {
+ const std::string message = "mutateDatum: changed datum at index " +
+ std::to_string(index) + " to datum type " +
+ std::to_string(type);
+ validate(sender, receiver, message, serialized,
+ [index, type, &change](std::vector<FmqRequestDatum>* serialized) {
+ (*serialized)[index] = change[type];
+ });
+ }
+ }
+ }
+}
+
+///////////////////////// BURST VALIATION TESTS ////////////////////////////////////
+
+static void validateBurstSerialization(const sp<IPreparedModel>& preparedModel,
+ const Request& request) {
+ // create burst
+ std::unique_ptr<RequestChannelSender> sender;
+ std::unique_ptr<ResultChannelReceiver> receiver;
+ sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+ sp<IBurstContext> context;
+ ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context));
+ ASSERT_NE(nullptr, sender.get());
+ ASSERT_NE(nullptr, receiver.get());
+ ASSERT_NE(nullptr, context.get());
+
+ // load memory into callback slots
+ std::vector<intptr_t> keys;
+ keys.reserve(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+ [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+ const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+
+ // ensure slot std::numeric_limits<int32_t>::max() doesn't exist (for
+ // subsequent slot validation testing)
+ ASSERT_TRUE(std::all_of(slots.begin(), slots.end(), [](int32_t slot) {
+ return slot != std::numeric_limits<int32_t>::max();
+ }));
+
+ // serialize the request
+ const auto serialized = android::nn::serialize(request, MeasureTiming::YES, slots);
+
+ // validations
+ removeDatumTest(sender.get(), receiver.get(), serialized);
+ addDatumTest(sender.get(), receiver.get(), serialized);
+ mutateDatumTest(sender.get(), receiver.get(), serialized);
+}
+
+// This test validates that when the Result message size exceeds length of the
+// result FMQ, the service instance gracefully fails and returns an error.
+static void validateBurstFmqLength(const sp<IPreparedModel>& preparedModel,
+ const Request& request) {
+ // create regular burst
+ std::shared_ptr<ExecutionBurstController> controllerRegular;
+ ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+ preparedModel, kExecutionBurstChannelLength, &controllerRegular));
+ ASSERT_NE(nullptr, controllerRegular.get());
+
+ // create burst with small output channel
+ std::shared_ptr<ExecutionBurstController> controllerSmall;
+ ASSERT_NO_FATAL_FAILURE(createBurstWithResultChannelLength(
+ preparedModel, kExecutionBurstChannelSmallLength, &controllerSmall));
+ ASSERT_NE(nullptr, controllerSmall.get());
+
+ // load memory into callback slots
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ }
+
+ // collect serialized result by running regular burst
+ const auto [nRegular, outputShapesRegular, timingRegular, fallbackRegular] =
+ controllerRegular->compute(request, MeasureTiming::NO, keys);
+ const ErrorStatus statusRegular = nn::convertResultCodeToErrorStatus(nRegular);
+ EXPECT_FALSE(fallbackRegular);
+
+ // skip test if regular burst output isn't useful for testing a failure
+ // caused by having too small of a length for the result FMQ
+ const std::vector<FmqResultDatum> serialized =
+ android::nn::serialize(statusRegular, outputShapesRegular, timingRegular);
+ if (statusRegular != ErrorStatus::NONE ||
+ serialized.size() <= kExecutionBurstChannelSmallLength) {
+ return;
+ }
+
+ // by this point, execution should fail because the result channel isn't
+ // large enough to return the serialized result
+ const auto [nSmall, outputShapesSmall, timingSmall, fallbackSmall] =
+ controllerSmall->compute(request, MeasureTiming::NO, keys);
+ const ErrorStatus statusSmall = nn::convertResultCodeToErrorStatus(nSmall);
+ EXPECT_NE(ErrorStatus::NONE, statusSmall);
+ EXPECT_EQ(0u, outputShapesSmall.size());
+ EXPECT_TRUE(badTiming(timingSmall));
+ EXPECT_FALSE(fallbackSmall);
+}
+
+static bool isSanitized(const FmqResultDatum& datum) {
+ using Discriminator = FmqResultDatum::hidl_discriminator;
+
+ // check to ensure the padding values in the returned
+ // FmqResultDatum::OperandInformation are initialized to 0
+ if (datum.getDiscriminator() == Discriminator::operandInformation) {
+ static_assert(
+ offsetof(FmqResultDatum::OperandInformation, isSufficient) == 0,
+ "unexpected value for offset of FmqResultDatum::OperandInformation::isSufficient");
+ static_assert(
+ sizeof(FmqResultDatum::OperandInformation::isSufficient) == 1,
+ "unexpected value for size of FmqResultDatum::OperandInformation::isSufficient");
+ static_assert(offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) == 4,
+ "unexpected value for offset of "
+ "FmqResultDatum::OperandInformation::numberOfDimensions");
+ static_assert(sizeof(FmqResultDatum::OperandInformation::numberOfDimensions) == 4,
+ "unexpected value for size of "
+ "FmqResultDatum::OperandInformation::numberOfDimensions");
+ static_assert(sizeof(FmqResultDatum::OperandInformation) == 8,
+ "unexpected value for size of "
+ "FmqResultDatum::OperandInformation");
+
+ constexpr size_t paddingOffset =
+ offsetof(FmqResultDatum::OperandInformation, isSufficient) +
+ sizeof(FmqResultDatum::OperandInformation::isSufficient);
+ constexpr size_t paddingSize =
+ offsetof(FmqResultDatum::OperandInformation, numberOfDimensions) - paddingOffset;
+
+ FmqResultDatum::OperandInformation initialized{};
+ std::memset(&initialized, 0, sizeof(initialized));
+
+ const char* initializedPaddingStart =
+ reinterpret_cast<const char*>(&initialized) + paddingOffset;
+ const char* datumPaddingStart =
+ reinterpret_cast<const char*>(&datum.operandInformation()) + paddingOffset;
+
+ return std::memcmp(datumPaddingStart, initializedPaddingStart, paddingSize) == 0;
+ }
+
+ // there are no other padding initialization checks required, so return true
+ // for any sum-type that isn't FmqResultDatum::OperandInformation
+ return true;
+}
+
+static void validateBurstSanitized(const sp<IPreparedModel>& preparedModel,
+ const Request& request) {
+ // create burst
+ std::unique_ptr<RequestChannelSender> sender;
+ std::unique_ptr<ResultChannelReceiver> receiver;
+ sp<ExecutionBurstCallback> callback = new ExecutionBurstCallback();
+ sp<IBurstContext> context;
+ ASSERT_NO_FATAL_FAILURE(createBurst(preparedModel, callback, &sender, &receiver, &context));
+ ASSERT_NE(nullptr, sender.get());
+ ASSERT_NE(nullptr, receiver.get());
+ ASSERT_NE(nullptr, context.get());
+
+ // load memory into callback slots
+ std::vector<intptr_t> keys;
+ keys.reserve(request.pools.size());
+ std::transform(request.pools.begin(), request.pools.end(), std::back_inserter(keys),
+ [](const auto& pool) { return reinterpret_cast<intptr_t>(&pool); });
+ const std::vector<int32_t> slots = callback->getSlots(request.pools, keys);
+
+ // send valid request
+ ASSERT_TRUE(sender->send(request, MeasureTiming::YES, slots));
+
+ // receive valid result
+ auto serialized = receiver->getPacketBlocking();
+ ASSERT_TRUE(serialized.has_value());
+
+ // sanitize result
+ ASSERT_TRUE(std::all_of(serialized->begin(), serialized->end(), isSanitized))
+ << "The result serialized data is not properly sanitized";
+}
+
+///////////////////////////// ENTRY POINT //////////////////////////////////
+
+void validateBurst(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ ASSERT_NO_FATAL_FAILURE(validateBurstSerialization(preparedModel, request));
+ ASSERT_NO_FATAL_FAILURE(validateBurstFmqLength(preparedModel, request));
+ ASSERT_NO_FATAL_FAILURE(validateBurstSanitized(preparedModel, request));
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/ValidateModel.cpp b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
new file mode 100644
index 0000000..44b32a9
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/ValidateModel.cpp
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "GeneratedTestHarness.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using V1_0::ErrorStatus;
+using V1_0::OperandLifeTime;
+using V1_1::ExecutionPreference;
+using V1_2::IPreparedModel;
+using V1_2::OperationType;
+using V1_2::OperationTypeRange;
+using V1_2::SymmPerChannelQuantParams;
+using V1_2::implementation::PreparedModelCallback;
+using HidlToken =
+ hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+
+///////////////////////// UTILITY FUNCTIONS /////////////////////////
+
+static void validateGetSupportedOperations(const sp<IDevice>& device, const std::string& message,
+ const Model& model) {
+ SCOPED_TRACE(message + " [getSupportedOperations_1_3]");
+
+ Return<void> ret = device->getSupportedOperations_1_3(
+ model, [&](ErrorStatus status, const hidl_vec<bool>&) {
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ });
+ EXPECT_TRUE(ret.isOk());
+}
+
+static void validatePrepareModel(const sp<IDevice>& device, const std::string& message,
+ const Model& model, ExecutionPreference preference) {
+ SCOPED_TRACE(message + " [prepareModel_1_3]");
+
+ sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ Return<ErrorStatus> prepareLaunchStatus =
+ device->prepareModel_1_3(model, preference, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ preparedModelCallback->wait();
+ ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, prepareReturnStatus);
+ sp<IPreparedModel> preparedModel = getPreparedModel_1_2(preparedModelCallback);
+ ASSERT_EQ(nullptr, preparedModel.get());
+}
+
+static bool validExecutionPreference(ExecutionPreference preference) {
+ return preference == ExecutionPreference::LOW_POWER ||
+ preference == ExecutionPreference::FAST_SINGLE_ANSWER ||
+ preference == ExecutionPreference::SUSTAINED_SPEED;
+}
+
+// Primary validation function. This function will take a valid model, apply a
+// mutation to it to invalidate the model, then pass it to interface calls that
+// use the model. Note that the model here is passed by value, and any mutation
+// to the model does not leave this function.
+static void validate(const sp<IDevice>& device, const std::string& message, Model model,
+ const std::function<void(Model*)>& mutation,
+ ExecutionPreference preference = ExecutionPreference::FAST_SINGLE_ANSWER) {
+ mutation(&model);
+ if (validExecutionPreference(preference)) {
+ validateGetSupportedOperations(device, message, model);
+ }
+ validatePrepareModel(device, message, model, preference);
+}
+
+static uint32_t addOperand(Model* model) {
+ return hidl_vec_push_back(&model->operands,
+ {
+ .type = OperandType::INT32,
+ .dimensions = {},
+ .numberOfConsumers = 0,
+ .scale = 0.0f,
+ .zeroPoint = 0,
+ .lifetime = OperandLifeTime::MODEL_INPUT,
+ .location = {.poolIndex = 0, .offset = 0, .length = 0},
+ });
+}
+
+static uint32_t addOperand(Model* model, OperandLifeTime lifetime) {
+ uint32_t index = addOperand(model);
+ model->operands[index].numberOfConsumers = 1;
+ model->operands[index].lifetime = lifetime;
+ return index;
+}
+
+///////////////////////// VALIDATE MODEL OPERAND TYPE /////////////////////////
+
+static const uint32_t invalidOperandTypes[] = {
+ static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MIN) - 1,
+ static_cast<uint32_t>(OperandTypeRange::FUNDAMENTAL_MAX) + 1,
+ static_cast<uint32_t>(OperandTypeRange::OEM_MIN) - 1,
+ static_cast<uint32_t>(OperandTypeRange::OEM_MAX) + 1,
+};
+
+static void mutateOperandTypeTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ for (uint32_t invalidOperandType : invalidOperandTypes) {
+ const std::string message = "mutateOperandTypeTest: operand " +
+ std::to_string(operand) + " set to value " +
+ std::to_string(invalidOperandType);
+ validate(device, message, model, [operand, invalidOperandType](Model* model) {
+ model->operands[operand].type = static_cast<OperandType>(invalidOperandType);
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE OPERAND RANK /////////////////////////
+
+static uint32_t getInvalidRank(OperandType type) {
+ switch (type) {
+ case OperandType::FLOAT16:
+ case OperandType::FLOAT32:
+ case OperandType::INT32:
+ case OperandType::UINT32:
+ case OperandType::BOOL:
+ return 1;
+ case OperandType::TENSOR_BOOL8:
+ case OperandType::TENSOR_FLOAT16:
+ case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_INT32:
+ case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT8_SYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static void mutateOperandRankTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const uint32_t invalidRank = getInvalidRank(model.operands[operand].type);
+ if (invalidRank == 0) {
+ continue;
+ }
+ const std::string message = "mutateOperandRankTest: operand " + std::to_string(operand) +
+ " has rank of " + std::to_string(invalidRank);
+ validate(device, message, model, [operand, invalidRank](Model* model) {
+ model->operands[operand].dimensions = std::vector<uint32_t>(invalidRank, 0);
+ });
+ }
+}
+
+///////////////////////// VALIDATE OPERAND SCALE /////////////////////////
+
+static float getInvalidScale(OperandType type) {
+ switch (type) {
+ case OperandType::FLOAT16:
+ case OperandType::FLOAT32:
+ case OperandType::INT32:
+ case OperandType::UINT32:
+ case OperandType::BOOL:
+ case OperandType::TENSOR_BOOL8:
+ case OperandType::TENSOR_FLOAT16:
+ case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ return 1.0f;
+ case OperandType::TENSOR_INT32:
+ return -1.0f;
+ case OperandType::TENSOR_QUANT8_SYMM:
+ case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
+ return 0.0f;
+ default:
+ return 0.0f;
+ }
+}
+
+static void mutateOperandScaleTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const float invalidScale = getInvalidScale(model.operands[operand].type);
+ const std::string message = "mutateOperandScaleTest: operand " + std::to_string(operand) +
+ " has scale of " + std::to_string(invalidScale);
+ validate(device, message, model, [operand, invalidScale](Model* model) {
+ model->operands[operand].scale = invalidScale;
+ });
+ }
+}
+
+///////////////////////// VALIDATE OPERAND ZERO POINT /////////////////////////
+
+static std::vector<int32_t> getInvalidZeroPoints(OperandType type) {
+ switch (type) {
+ case OperandType::FLOAT16:
+ case OperandType::FLOAT32:
+ case OperandType::INT32:
+ case OperandType::UINT32:
+ case OperandType::BOOL:
+ case OperandType::TENSOR_BOOL8:
+ case OperandType::TENSOR_FLOAT16:
+ case OperandType::TENSOR_FLOAT32:
+ case OperandType::TENSOR_INT32:
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+ return {1};
+ case OperandType::TENSOR_QUANT8_ASYMM:
+ return {-1, 256};
+ case OperandType::TENSOR_QUANT8_SYMM:
+ return {-129, -1, 1, 128};
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ return {-1, 65536};
+ case OperandType::TENSOR_QUANT16_SYMM:
+ return {-32769, -1, 1, 32768};
+ default:
+ return {};
+ }
+}
+
+static void mutateOperandZeroPointTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ const std::vector<int32_t> invalidZeroPoints =
+ getInvalidZeroPoints(model.operands[operand].type);
+ for (int32_t invalidZeroPoint : invalidZeroPoints) {
+ const std::string message = "mutateOperandZeroPointTest: operand " +
+ std::to_string(operand) + " has zero point of " +
+ std::to_string(invalidZeroPoint);
+ validate(device, message, model, [operand, invalidZeroPoint](Model* model) {
+ model->operands[operand].zeroPoint = invalidZeroPoint;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE EXTRA ??? /////////////////////////
+
+// TODO: Operand::lifetime
+// TODO: Operand::location
+
+///////////////////////// VALIDATE OPERATION OPERAND TYPE /////////////////////////
+
+static void mutateOperand(Operand* operand, OperandType type) {
+ Operand newOperand = *operand;
+ newOperand.type = type;
+ switch (type) {
+ case OperandType::FLOAT16:
+ case OperandType::FLOAT32:
+ case OperandType::INT32:
+ case OperandType::UINT32:
+ case OperandType::BOOL:
+ newOperand.dimensions = hidl_vec<uint32_t>();
+ newOperand.scale = 0.0f;
+ newOperand.zeroPoint = 0;
+ break;
+ case OperandType::TENSOR_BOOL8:
+ case OperandType::TENSOR_FLOAT16:
+ case OperandType::TENSOR_FLOAT32:
+ newOperand.dimensions =
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ newOperand.scale = 0.0f;
+ newOperand.zeroPoint = 0;
+ break;
+ case OperandType::TENSOR_INT32:
+ newOperand.dimensions =
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ newOperand.zeroPoint = 0;
+ break;
+ case OperandType::TENSOR_QUANT8_ASYMM:
+ case OperandType::TENSOR_QUANT8_SYMM:
+ case OperandType::TENSOR_QUANT16_ASYMM:
+ case OperandType::TENSOR_QUANT16_SYMM:
+ newOperand.dimensions =
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ newOperand.scale = operand->scale != 0.0f ? operand->scale : 1.0f;
+ break;
+ case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: {
+ newOperand.dimensions =
+ operand->dimensions.size() > 0 ? operand->dimensions : hidl_vec<uint32_t>({1});
+ newOperand.scale = 0.0f;
+ newOperand.zeroPoint = 0;
+
+ SymmPerChannelQuantParams channelQuant;
+ channelQuant.channelDim = 0;
+ channelQuant.scales = hidl_vec<float>(
+ operand->dimensions.size() > 0 ? static_cast<size_t>(operand->dimensions[0])
+ : 0);
+ for (size_t i = 0; i < channelQuant.scales.size(); ++i) {
+ channelQuant.scales[i] = 1.0f;
+ }
+ newOperand.extraParams.channelQuant(std::move(channelQuant));
+ } break;
+ case OperandType::OEM:
+ case OperandType::TENSOR_OEM_BYTE:
+ default:
+ break;
+ }
+ *operand = newOperand;
+}
+
+static bool mutateOperationOperandTypeSkip(size_t operand, OperandType type, const Model& model) {
+ // Do not test OEM types
+ if (type == model.operands[operand].type || type == OperandType::OEM ||
+ type == OperandType::TENSOR_OEM_BYTE) {
+ return true;
+ }
+ for (const Operation& operation : model.operations) {
+ // Skip mutateOperationOperandTypeTest for the following operations.
+ // - LSH_PROJECTION's second argument is allowed to have any type.
+ // - ARGMIN and ARGMAX's first argument can be any of
+ // TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
+ // - CAST's argument can be any of TENSOR_(FLOAT16|FLOAT32|INT32|QUANT8_ASYMM).
+ // - RANDOM_MULTINOMIAL's argument can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
+ // - DEQUANTIZE input can be any of
+ // TENSOR_(QUANT8_ASYMM|QUANT8_SYMM|QUANT8_SYMM_PER_CHANNEL), output can
+ // be of either TENSOR_FLOAT16 or TENSOR_FLOAT32.
+ // - QUANTIZE input can be either TENSOR_FLOAT16 or TENSOR_FLOAT32
+ // - CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - DEPTHWISE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - GROUPED_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ // - TRANSPOSE_CONV_2D filter type (arg 1) can be QUANT8_ASYMM or QUANT8_SYMM_PER_CHANNEL
+ switch (operation.type) {
+ case OperationType::LSH_PROJECTION: {
+ if (operand == operation.inputs[1]) {
+ return true;
+ }
+ } break;
+ case OperationType::CAST:
+ case OperationType::ARGMAX:
+ case OperationType::ARGMIN: {
+ if (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32 ||
+ type == OperandType::TENSOR_INT32 || type == OperandType::TENSOR_QUANT8_ASYMM) {
+ return true;
+ }
+ } break;
+ case OperationType::QUANTIZE:
+ case OperationType::RANDOM_MULTINOMIAL: {
+ if (operand == operation.inputs[0] &&
+ (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
+ return true;
+ }
+ } break;
+ case OperationType::DEQUANTIZE: {
+ if (operand == operation.inputs[0] &&
+ (type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_SYMM ||
+ type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
+ return true;
+ }
+ if (operand == operation.outputs[0] &&
+ (type == OperandType::TENSOR_FLOAT16 || type == OperandType::TENSOR_FLOAT32)) {
+ return true;
+ }
+ } break;
+ case OperationType::TRANSPOSE_CONV_2D:
+ case OperationType::GROUPED_CONV_2D:
+ case OperationType::DEPTHWISE_CONV_2D:
+ case OperationType::CONV_2D: {
+ if (operand == operation.inputs[1] &&
+ (type == OperandType::TENSOR_QUANT8_ASYMM ||
+ type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL)) {
+ return true;
+ }
+ } break;
+ default:
+ break;
+ }
+ }
+ return false;
+}
+
+static void mutateOperationOperandTypeTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ for (OperandType invalidOperandType : hidl_enum_range<OperandType>{}) {
+ if (mutateOperationOperandTypeSkip(operand, invalidOperandType, model)) {
+ continue;
+ }
+ const std::string message = "mutateOperationOperandTypeTest: operand " +
+ std::to_string(operand) + " set to type " +
+ toString(invalidOperandType);
+ validate(device, message, model, [operand, invalidOperandType](Model* model) {
+ mutateOperand(&model->operands[operand], invalidOperandType);
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE MODEL OPERATION TYPE /////////////////////////
+
+static const uint32_t invalidOperationTypes[] = {
+ static_cast<uint32_t>(OperationTypeRange::FUNDAMENTAL_MAX) + 1,
+ static_cast<uint32_t>(OperationTypeRange::OEM_MIN) - 1,
+ static_cast<uint32_t>(OperationTypeRange::OEM_MAX) + 1,
+};
+
+static void mutateOperationTypeTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (uint32_t invalidOperationType : invalidOperationTypes) {
+ const std::string message = "mutateOperationTypeTest: operation " +
+ std::to_string(operation) + " set to value " +
+ std::to_string(invalidOperationType);
+ validate(device, message, model, [operation, invalidOperationType](Model* model) {
+ model->operations[operation].type =
+ static_cast<OperationType>(invalidOperationType);
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE MODEL OPERATION INPUT OPERAND INDEX /////////////////////////
+
+static void mutateOperationInputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const uint32_t invalidOperand = model.operands.size();
+ for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
+ const std::string message = "mutateOperationInputOperandIndexTest: operation " +
+ std::to_string(operation) + " input " +
+ std::to_string(input);
+ validate(device, message, model, [operation, input, invalidOperand](Model* model) {
+ model->operations[operation].inputs[input] = invalidOperand;
+ });
+ }
+ }
+}
+
+///////////////////////// VALIDATE MODEL OPERATION OUTPUT OPERAND INDEX /////////////////////////
+
+static void mutateOperationOutputOperandIndexTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const uint32_t invalidOperand = model.operands.size();
+ for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
+ const std::string message = "mutateOperationOutputOperandIndexTest: operation " +
+ std::to_string(operation) + " output " +
+ std::to_string(output);
+ validate(device, message, model, [operation, output, invalidOperand](Model* model) {
+ model->operations[operation].outputs[output] = invalidOperand;
+ });
+ }
+ }
+}
+
+///////////////////////// REMOVE OPERAND FROM EVERYTHING /////////////////////////
+
+static void removeValueAndDecrementGreaterValues(hidl_vec<uint32_t>* vec, uint32_t value) {
+ if (vec) {
+ // remove elements matching "value"
+ auto last = std::remove(vec->begin(), vec->end(), value);
+ vec->resize(std::distance(vec->begin(), last));
+
+ // decrement elements exceeding "value"
+ std::transform(vec->begin(), vec->end(), vec->begin(),
+ [value](uint32_t v) { return v > value ? v-- : v; });
+ }
+}
+
+static void removeOperand(Model* model, uint32_t index) {
+ hidl_vec_removeAt(&model->operands, index);
+ for (Operation& operation : model->operations) {
+ removeValueAndDecrementGreaterValues(&operation.inputs, index);
+ removeValueAndDecrementGreaterValues(&operation.outputs, index);
+ }
+ removeValueAndDecrementGreaterValues(&model->inputIndexes, index);
+ removeValueAndDecrementGreaterValues(&model->outputIndexes, index);
+}
+
+static bool removeOperandSkip(size_t operand, const Model& model) {
+ for (const Operation& operation : model.operations) {
+ // Skip removeOperandTest for the following operations.
+ // - SPLIT's outputs are not checked during prepareModel.
+ if (operation.type == OperationType::SPLIT) {
+ for (const size_t outOprand : operation.outputs) {
+ if (operand == outOprand) {
+ return true;
+ }
+ }
+ }
+ // BIDIRECTIONAL_SEQUENCE_LSTM and BIDIRECTIONAL_SEQUENCE_RNN can have either one or two
+ // outputs depending on their mergeOutputs parameter.
+ if (operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_LSTM ||
+ operation.type == OperationType::BIDIRECTIONAL_SEQUENCE_RNN) {
+ for (const size_t outOprand : operation.outputs) {
+ if (operand == outOprand) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static void removeOperandTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operand = 0; operand < model.operands.size(); ++operand) {
+ if (removeOperandSkip(operand, model)) {
+ continue;
+ }
+ const std::string message = "removeOperandTest: operand " + std::to_string(operand);
+ validate(device, message, model,
+ [operand](Model* model) { removeOperand(model, operand); });
+ }
+}
+
+///////////////////////// REMOVE OPERATION /////////////////////////
+
+static void removeOperation(Model* model, uint32_t index) {
+ for (uint32_t operand : model->operations[index].inputs) {
+ model->operands[operand].numberOfConsumers--;
+ }
+ hidl_vec_removeAt(&model->operations, index);
+}
+
+static void removeOperationTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const std::string message = "removeOperationTest: operation " + std::to_string(operation);
+ validate(device, message, model,
+ [operation](Model* model) { removeOperation(model, operation); });
+ }
+}
+
+///////////////////////// REMOVE OPERATION INPUT /////////////////////////
+
+static bool removeOperationInputSkip(const Operation& op, size_t input) {
+ // Skip removeOperationInputTest for the following operations.
+ // - CONCATENATION has at least 2 inputs, with the last element being INT32.
+ // - CONV_2D, DEPTHWISE_CONV_2D, MAX_POOL_2D, AVERAGE_POOL_2D, L2_POOL_2D, RESIZE_BILINEAR,
+ // SPACE_TO_DEPTH, SPACE_TO_DEPTH, SPACE_TO_BATCH_ND, BATCH_TO_SPACE_ND can have an optional
+ // layout parameter.
+ // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional axis
+ // parameter.
+ switch (op.type) {
+ case OperationType::CONCATENATION: {
+ if (op.inputs.size() > 2 && input != op.inputs.size() - 1) {
+ return true;
+ }
+ } break;
+ case OperationType::DEPTHWISE_CONV_2D: {
+ if ((op.inputs.size() == 12 && input == 11) || (op.inputs.size() == 9 && input == 8)) {
+ return true;
+ }
+ } break;
+ case OperationType::CONV_2D:
+ case OperationType::AVERAGE_POOL_2D:
+ case OperationType::MAX_POOL_2D:
+ case OperationType::L2_POOL_2D: {
+ if ((op.inputs.size() == 11 && input == 10) || (op.inputs.size() == 8 && input == 7)) {
+ return true;
+ }
+ } break;
+ case OperationType::RESIZE_BILINEAR: {
+ if (op.inputs.size() == 4 && input == 3) {
+ return true;
+ }
+ } break;
+ case OperationType::SPACE_TO_DEPTH:
+ case OperationType::DEPTH_TO_SPACE:
+ case OperationType::BATCH_TO_SPACE_ND: {
+ if (op.inputs.size() == 3 && input == 2) {
+ return true;
+ }
+ } break;
+ case OperationType::SPACE_TO_BATCH_ND: {
+ if (op.inputs.size() == 4 && input == 3) {
+ return true;
+ }
+ } break;
+ case OperationType::L2_NORMALIZATION: {
+ if (op.inputs.size() == 2 && input == 1) {
+ return true;
+ }
+ } break;
+ case OperationType::LOCAL_RESPONSE_NORMALIZATION: {
+ if (op.inputs.size() == 6 && input == 5) {
+ return true;
+ }
+ } break;
+ case OperationType::SOFTMAX: {
+ if (op.inputs.size() == 3 && input == 2) {
+ return true;
+ }
+ } break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void removeOperationInputTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t input = 0; input < model.operations[operation].inputs.size(); ++input) {
+ const Operation& op = model.operations[operation];
+ if (removeOperationInputSkip(op, input)) {
+ continue;
+ }
+ const std::string message = "removeOperationInputTest: operation " +
+ std::to_string(operation) + ", input " +
+ std::to_string(input);
+ validate(device, message, model, [operation, input](Model* model) {
+ uint32_t operand = model->operations[operation].inputs[input];
+ model->operands[operand].numberOfConsumers--;
+ hidl_vec_removeAt(&model->operations[operation].inputs, input);
+ });
+ }
+ }
+}
+
+///////////////////////// REMOVE OPERATION OUTPUT /////////////////////////
+
+static void removeOperationOutputTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ for (size_t output = 0; output < model.operations[operation].outputs.size(); ++output) {
+ const std::string message = "removeOperationOutputTest: operation " +
+ std::to_string(operation) + ", output " +
+ std::to_string(output);
+ validate(device, message, model, [operation, output](Model* model) {
+ hidl_vec_removeAt(&model->operations[operation].outputs, output);
+ });
+ }
+ }
+}
+
+///////////////////////// MODEL VALIDATION /////////////////////////
+
+// TODO: remove model input
+// TODO: remove model output
+// TODO: add unused operation
+
+///////////////////////// ADD OPERATION INPUT /////////////////////////
+
+static bool addOperationInputSkip(const Operation& op) {
+ // Skip addOperationInputTest for the following operations.
+ // - L2_NORMALIZATION, LOCAL_RESPONSE_NORMALIZATION, SOFTMAX can have an optional INT32 axis
+ // parameter.
+ if ((op.type == OperationType::L2_NORMALIZATION && op.inputs.size() == 1) ||
+ (op.type == OperationType::LOCAL_RESPONSE_NORMALIZATION && op.inputs.size() == 5) ||
+ (op.type == OperationType::SOFTMAX && op.inputs.size() == 2)) {
+ return true;
+ }
+ return false;
+}
+
+static void addOperationInputTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ if (addOperationInputSkip(model.operations[operation])) {
+ continue;
+ }
+ const std::string message = "addOperationInputTest: operation " + std::to_string(operation);
+ validate(device, message, model, [operation](Model* model) {
+ uint32_t index = addOperand(model, OperandLifeTime::MODEL_INPUT);
+ hidl_vec_push_back(&model->operations[operation].inputs, index);
+ hidl_vec_push_back(&model->inputIndexes, index);
+ });
+ }
+}
+
+///////////////////////// ADD OPERATION OUTPUT /////////////////////////
+
+static void addOperationOutputTest(const sp<IDevice>& device, const Model& model) {
+ for (size_t operation = 0; operation < model.operations.size(); ++operation) {
+ const std::string message =
+ "addOperationOutputTest: operation " + std::to_string(operation);
+ validate(device, message, model, [operation](Model* model) {
+ uint32_t index = addOperand(model, OperandLifeTime::MODEL_OUTPUT);
+ hidl_vec_push_back(&model->operations[operation].outputs, index);
+ hidl_vec_push_back(&model->outputIndexes, index);
+ });
+ }
+}
+
+///////////////////////// VALIDATE EXECUTION PREFERENCE /////////////////////////
+
+static const int32_t invalidExecutionPreferences[] = {
+ static_cast<int32_t>(ExecutionPreference::LOW_POWER) - 1, // lower bound
+ static_cast<int32_t>(ExecutionPreference::SUSTAINED_SPEED) + 1, // upper bound
+};
+
+static void mutateExecutionPreferenceTest(const sp<IDevice>& device, const Model& model) {
+ for (int32_t preference : invalidExecutionPreferences) {
+ const std::string message =
+ "mutateExecutionPreferenceTest: preference " + std::to_string(preference);
+ validate(
+ device, message, model, [](Model*) {},
+ static_cast<ExecutionPreference>(preference));
+ }
+}
+
+////////////////////////// ENTRY POINT //////////////////////////////
+
+void validateModel(const sp<IDevice>& device, const Model& model) {
+ mutateOperandTypeTest(device, model);
+ mutateOperandRankTest(device, model);
+ mutateOperandScaleTest(device, model);
+ mutateOperandZeroPointTest(device, model);
+ mutateOperationOperandTypeTest(device, model);
+ mutateOperationTypeTest(device, model);
+ mutateOperationInputOperandIndexTest(device, model);
+ mutateOperationOutputOperandIndexTest(device, model);
+ removeOperandTest(device, model);
+ removeOperationTest(device, model);
+ removeOperationInputTest(device, model);
+ removeOperationOutputTest(device, model);
+ addOperationInputTest(device, model);
+ addOperationOutputTest(device, model);
+ mutateExecutionPreferenceTest(device, model);
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
new file mode 100644
index 0000000..c00512c
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/ValidateRequest.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include <chrono>
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+#include "ExecutionBurstController.h"
+#include "GeneratedTestHarness.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using V1_0::ErrorStatus;
+using V1_0::Request;
+using V1_2::IPreparedModel;
+using V1_2::MeasureTiming;
+using V1_2::OutputShape;
+using V1_2::Timing;
+using V1_2::implementation::ExecutionCallback;
+
+///////////////////////// UTILITY FUNCTIONS /////////////////////////
+
+static bool badTiming(Timing timing) {
+ return timing.timeOnDevice == UINT64_MAX && timing.timeInDriver == UINT64_MAX;
+}
+
+// Primary validation function. This function will take a valid request, apply a
+// mutation to it to invalidate the request, then pass it to interface calls
+// that use the request. Note that the request here is passed by value, and any
+// mutation to the request does not leave this function.
+static void validate(const sp<IPreparedModel>& preparedModel, const std::string& message,
+ Request request, const std::function<void(Request*)>& mutation) {
+ mutation(&request);
+
+ // We'd like to test both with timing requested and without timing
+ // requested. Rather than running each test both ways, we'll decide whether
+ // to request timing by hashing the message. We do not use std::hash because
+ // it is not guaranteed stable across executions.
+ char hash = 0;
+ for (auto c : message) {
+ hash ^= c;
+ };
+ MeasureTiming measure = (hash & 1) ? MeasureTiming::YES : MeasureTiming::NO;
+
+ // asynchronous
+ {
+ SCOPED_TRACE(message + " [execute_1_2]");
+
+ sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+ Return<ErrorStatus> executeLaunchStatus =
+ preparedModel->execute_1_2(request, measure, executionCallback);
+ ASSERT_TRUE(executeLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, static_cast<ErrorStatus>(executeLaunchStatus));
+
+ executionCallback->wait();
+ ErrorStatus executionReturnStatus = executionCallback->getStatus();
+ const auto& outputShapes = executionCallback->getOutputShapes();
+ Timing timing = executionCallback->getTiming();
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, executionReturnStatus);
+ ASSERT_EQ(outputShapes.size(), 0);
+ ASSERT_TRUE(badTiming(timing));
+ }
+
+ // synchronous
+ {
+ SCOPED_TRACE(message + " [executeSynchronously]");
+
+ Return<void> executeStatus = preparedModel->executeSynchronously(
+ request, measure,
+ [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes,
+ const Timing& timing) {
+ ASSERT_EQ(ErrorStatus::INVALID_ARGUMENT, error);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ });
+ ASSERT_TRUE(executeStatus.isOk());
+ }
+
+ // burst
+ {
+ SCOPED_TRACE(message + " [burst]");
+
+ // create burst
+ std::shared_ptr<::android::nn::ExecutionBurstController> burst =
+ android::nn::ExecutionBurstController::create(preparedModel,
+ std::chrono::microseconds{0});
+ ASSERT_NE(nullptr, burst.get());
+
+ // create memory keys
+ std::vector<intptr_t> keys(request.pools.size());
+ for (size_t i = 0; i < keys.size(); ++i) {
+ keys[i] = reinterpret_cast<intptr_t>(&request.pools[i]);
+ }
+
+ // execute and verify
+ const auto [n, outputShapes, timing, fallback] = burst->compute(request, measure, keys);
+ const ErrorStatus status = nn::convertResultCodeToErrorStatus(n);
+ EXPECT_EQ(ErrorStatus::INVALID_ARGUMENT, status);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ EXPECT_FALSE(fallback);
+
+ // additional burst testing
+ if (request.pools.size() > 0) {
+ // valid free
+ burst->freeMemory(keys.front());
+
+ // negative test: invalid free of unknown (blank) memory
+ burst->freeMemory(intptr_t{});
+
+ // negative test: double free of memory
+ burst->freeMemory(keys.front());
+ }
+ }
+}
+
+///////////////////////// REMOVE INPUT ////////////////////////////////////
+
+static void removeInputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ for (size_t input = 0; input < request.inputs.size(); ++input) {
+ const std::string message = "removeInput: removed input " + std::to_string(input);
+ validate(preparedModel, message, request,
+ [input](Request* request) { hidl_vec_removeAt(&request->inputs, input); });
+ }
+}
+
+///////////////////////// REMOVE OUTPUT ////////////////////////////////////
+
+static void removeOutputTest(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ for (size_t output = 0; output < request.outputs.size(); ++output) {
+ const std::string message = "removeOutput: removed Output " + std::to_string(output);
+ validate(preparedModel, message, request,
+ [output](Request* request) { hidl_vec_removeAt(&request->outputs, output); });
+ }
+}
+
+///////////////////////////// ENTRY POINT //////////////////////////////////
+
+void validateRequest(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ removeInputTest(preparedModel, request);
+ removeOutputTest(preparedModel, request);
+}
+
+void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
+ Return<void> executeStatus = preparedModel->executeSynchronously(
+ request, MeasureTiming::NO,
+ [](ErrorStatus error, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
+ ASSERT_NE(ErrorStatus::NONE, error);
+ EXPECT_EQ(outputShapes.size(), 0);
+ EXPECT_TRUE(badTiming(timing));
+ });
+ ASSERT_TRUE(executeStatus.isOk());
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
new file mode 100644
index 0000000..4f0e150
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include "VtsHalNeuralnetworks.h"
+#include <android-base/logging.h>
+#include <hidl/ServiceManagement.h>
+#include <string>
+#include <utility>
+#include "1.0/Callbacks.h"
+#include "1.0/Utils.h"
+#include "GeneratedTestHarness.h"
+#include "TestHarness.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using HidlToken =
+ hidl_array<uint8_t, static_cast<uint32_t>(V1_2::Constant::BYTE_SIZE_OF_CACHE_TOKEN)>;
+using V1_0::ErrorStatus;
+using V1_0::Request;
+using V1_1::ExecutionPreference;
+using V1_2::IPreparedModel;
+using V1_2::implementation::PreparedModelCallback;
+
+// internal helper function
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+ sp<IPreparedModel>* preparedModel) {
+ ASSERT_NE(nullptr, preparedModel);
+ *preparedModel = nullptr;
+
+ // see if service can handle model
+ bool fullySupportsModel = false;
+ const Return<void> supportedCall = device->getSupportedOperations_1_3(
+ model, [&fullySupportsModel](ErrorStatus status, const hidl_vec<bool>& supported) {
+ ASSERT_EQ(ErrorStatus::NONE, status);
+ ASSERT_NE(0ul, supported.size());
+ fullySupportsModel = std::all_of(supported.begin(), supported.end(),
+ [](bool valid) { return valid; });
+ });
+ ASSERT_TRUE(supportedCall.isOk());
+
+ // launch prepare model
+ const sp<PreparedModelCallback> preparedModelCallback = new PreparedModelCallback();
+ const Return<ErrorStatus> prepareLaunchStatus = device->prepareModel_1_3(
+ model, ExecutionPreference::FAST_SINGLE_ANSWER, hidl_vec<hidl_handle>(),
+ hidl_vec<hidl_handle>(), HidlToken(), preparedModelCallback);
+ ASSERT_TRUE(prepareLaunchStatus.isOk());
+ ASSERT_EQ(ErrorStatus::NONE, static_cast<ErrorStatus>(prepareLaunchStatus));
+
+ // retrieve prepared model
+ preparedModelCallback->wait();
+ const ErrorStatus prepareReturnStatus = preparedModelCallback->getStatus();
+ *preparedModel = getPreparedModel_1_2(preparedModelCallback);
+
+ // The getSupportedOperations_1_3 call returns a list of operations that are
+ // guaranteed not to fail if prepareModel_1_3 is called, and
+ // 'fullySupportsModel' is true i.f.f. the entire model is guaranteed.
+ // If a driver has any doubt that it can prepare an operation, it must
+ // return false. So here, if a driver isn't sure if it can support an
+ // operation, but reports that it successfully prepared the model, the test
+ // can continue.
+ if (!fullySupportsModel && prepareReturnStatus != ErrorStatus::NONE) {
+ ASSERT_EQ(nullptr, preparedModel->get());
+ LOG(INFO) << "NN VTS: Early termination of test because vendor service cannot prepare "
+ "model that it does not support.";
+ std::cout << "[ ] Early termination of test because vendor service cannot "
+ "prepare model that it does not support."
+ << std::endl;
+ GTEST_SKIP();
+ }
+ ASSERT_EQ(ErrorStatus::NONE, prepareReturnStatus);
+ ASSERT_NE(nullptr, preparedModel->get());
+}
+
+void NeuralnetworksHidlTest::SetUp() {
+ testing::TestWithParam<NeuralnetworksHidlTestParam>::SetUp();
+ ASSERT_NE(kDevice, nullptr);
+}
+
+static NamedDevice makeNamedDevice(const std::string& name) {
+ return {name, IDevice::getService(name)};
+}
+
+static std::vector<NamedDevice> getNamedDevicesImpl() {
+ // Retrieves the name of all service instances that implement IDevice,
+ // including any Lazy HAL instances.
+ const std::vector<std::string> names = hardware::getAllHalInstanceNames(IDevice::descriptor);
+
+ // Get a handle to each device and pair it with its name.
+ std::vector<NamedDevice> namedDevices;
+ namedDevices.reserve(names.size());
+ std::transform(names.begin(), names.end(), std::back_inserter(namedDevices), makeNamedDevice);
+ return namedDevices;
+}
+
+const std::vector<NamedDevice>& getNamedDevices() {
+ const static std::vector<NamedDevice> devices = getNamedDevicesImpl();
+ return devices;
+}
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info) {
+ return gtestCompliantName(getName(info.param));
+}
+
+INSTANTIATE_DEVICE_TEST(NeuralnetworksHidlTest);
+
+// Forward declaration from ValidateModel.cpp
+void validateModel(const sp<IDevice>& device, const Model& model);
+// Forward declaration from ValidateRequest.cpp
+void validateRequest(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+// Forward declaration from ValidateRequest.cpp
+void validateRequestFailure(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+// Forward declaration from ValidateBurst.cpp
+void validateBurst(const sp<IPreparedModel>& preparedModel, const V1_0::Request& request);
+
+void validateEverything(const sp<IDevice>& device, const Model& model, const Request& request) {
+ validateModel(device, model);
+
+ // Create IPreparedModel.
+ sp<IPreparedModel> preparedModel;
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+
+ validateRequest(preparedModel, request);
+ validateBurst(preparedModel, request);
+}
+
+void validateFailure(const sp<IDevice>& device, const Model& model, const Request& request) {
+ // TODO: Should this always succeed?
+ // What if the invalid input is part of the model (i.e., a parameter).
+ validateModel(device, model);
+
+ // Create IPreparedModel.
+ sp<IPreparedModel> preparedModel;
+ createPreparedModel(device, model, &preparedModel);
+ if (preparedModel == nullptr) return;
+
+ validateRequestFailure(preparedModel, request);
+}
+
+TEST_P(ValidationTest, Test) {
+ const Model model = createModel(kTestModel);
+ const Request request = createRequest(kTestModel);
+ if (kTestModel.expectFailure) {
+ validateFailure(kDevice, model, request);
+ } else {
+ validateEverything(kDevice, model, request);
+ }
+}
+
+INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
+
+sp<IPreparedModel> getPreparedModel_1_2(const sp<PreparedModelCallback>& callback) {
+ sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
+ return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
new file mode 100644
index 0000000..fc654ce
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
+#define ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
+
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/IDevice.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <gtest/gtest.h>
+#include "1.0/Utils.h"
+#include "1.2/Callbacks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using NamedDevice = Named<sp<IDevice>>;
+using NeuralnetworksHidlTestParam = NamedDevice;
+
+class NeuralnetworksHidlTest : public testing::TestWithParam<NeuralnetworksHidlTestParam> {
+ protected:
+ void SetUp() override;
+ const sp<IDevice> kDevice = getData(GetParam());
+};
+
+const std::vector<NamedDevice>& getNamedDevices();
+
+std::string printNeuralnetworksHidlTest(
+ const testing::TestParamInfo<NeuralnetworksHidlTestParam>& info);
+
+#define INSTANTIATE_DEVICE_TEST(TestSuite) \
+ INSTANTIATE_TEST_SUITE_P(PerInstance, TestSuite, testing::ValuesIn(getNamedDevices()), \
+ printNeuralnetworksHidlTest)
+
+// Create an IPreparedModel object. If the model cannot be prepared,
+// "preparedModel" will be nullptr instead.
+void createPreparedModel(const sp<IDevice>& device, const Model& model,
+ sp<V1_2::IPreparedModel>* preparedModel);
+
+// Utility function to get PreparedModel from callback and downcast to V1_2.
+sp<V1_2::IPreparedModel> getPreparedModel_1_2(
+ const sp<V1_2::implementation::PreparedModelCallback>& callback);
+
+} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
+
+#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
diff --git a/neuralnetworks/TEST_MAPPING b/neuralnetworks/TEST_MAPPING
index 50b6c19..421922a 100644
--- a/neuralnetworks/TEST_MAPPING
+++ b/neuralnetworks/TEST_MAPPING
@@ -1,26 +1,35 @@
{
"presubmit": [
{
- "name": "PresubmitHalNeuralnetworksV1_0TargetTest",
+ "name": "VtsHalNeuralnetworksV1_0TargetTest",
"options": [
{
- "native-test-flag": "--hal_service_instance=android.hardware.neuralnetworks@1.0::IDevice/sample-all"
+ // Just use sample-all driver for presubmit tests for faster results.
+ // The other sample drivers (fast-float, quant, etc.) are subsets of
+ // sample-all.
+ "native-test-flag": "--gtest_filter=*sample_all*"
}
]
},
{
- "name": "PresubmitHalNeuralnetworksV1_1TargetTest",
+ "name": "VtsHalNeuralnetworksV1_1TargetTest",
"options": [
{
- "native-test-flag": "--hal_service_instance=android.hardware.neuralnetworks@1.1::IDevice/sample-all"
+ // Just use sample-all driver for presubmit tests for faster results.
+ // The other sample drivers (fast-float, quant, etc.) are subsets of
+ // sample-all.
+ "native-test-flag": "--gtest_filter=*sample_all*"
}
]
},
{
- "name": "PresubmitHalNeuralnetworksV1_2TargetTest",
+ "name": "VtsHalNeuralnetworksV1_2TargetTest",
"options": [
{
- "native-test-flag": "--hal_service_instance=android.hardware.neuralnetworks@1.2::IDevice/sample-all"
+ // Just use sample-all driver for presubmit tests for faster results.
+ // The other sample drivers (fast-float, quant, etc.) are subsets of
+ // sample-all.
+ "native-test-flag": "--gtest_filter=*sample_all*"
}
]
}
diff --git a/power/stats/1.0/default/PowerStats.cpp b/power/stats/1.0/default/PowerStats.cpp
index 78766f2..68275ce 100644
--- a/power/stats/1.0/default/PowerStats.cpp
+++ b/power/stats/1.0/default/PowerStats.cpp
@@ -87,7 +87,7 @@
std::string railFileName;
std::string spsFileName;
uint32_t index = 0;
- uint32_t samplingRate;
+ unsigned long samplingRate;
for (const auto& path : mPm.devicePaths) {
railFileName = path + "/enabled_rails";
spsFileName = path + "/sampling_rate";
@@ -109,10 +109,11 @@
while (std::getline(railNames, line)) {
std::vector<std::string> words = android::base::Split(line, ":");
if (words.size() == 2) {
- mPm.railsInfo.emplace(words[0], RailData{.devicePath = path,
- .index = index,
- .subsysName = words[1],
- .samplingRate = samplingRate});
+ mPm.railsInfo.emplace(
+ words[0], RailData{.devicePath = path,
+ .index = index,
+ .subsysName = words[1],
+ .samplingRate = static_cast<uint32_t>(samplingRate)});
index++;
} else {
ALOGW("Unexpected format in file: %s", railFileName.c_str());
diff --git a/radio/1.2/types.hal b/radio/1.2/types.hal
index dffebd3..f10d753 100644
--- a/radio/1.2/types.hal
+++ b/radio/1.2/types.hal
@@ -161,7 +161,8 @@
ScanType type;
/**
- * Time interval in seconds between periodic scans, only valid when type = PERIODIC
+ * Time interval in seconds between the completion of one scan and the start of a subsequent scan.
+ * This field is only valid when 'type' is 'PERIODIC'.
* Range: ScanIntervalRange:MIN to ScanIntervalRange:MAX
*/
int32_t interval;
diff --git a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
index 5184ef9..a98f22a 100644
--- a/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.2/vts/functional/radio_hidl_hal_api.cpp
@@ -46,7 +46,10 @@
::android::hardware::radio::V1_2::NetworkScanRequest request = {
.type = ScanType::ONE_SHOT,
.interval = 60,
- .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850}};
+ .specifiers = {::GERAN_SPECIFIER_P900, ::GERAN_SPECIFIER_850},
+ .maxSearchTime = 60,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_2->startNetworkScan_1_2(serial, request);
ASSERT_OK(res);
diff --git a/radio/1.4/vts/functional/radio_hidl_hal_api.cpp b/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
index f81af9b..a4953d7 100644
--- a/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.4/vts/functional/radio_hidl_hal_api.cpp
@@ -183,7 +183,12 @@
.channels = {1, 2}};
::android::hardware::radio::V1_2::NetworkScanRequest request = {
- .type = ScanType::ONE_SHOT, .interval = 60, .specifiers = {specifier}};
+ .type = ScanType::ONE_SHOT,
+ .interval = 60,
+ .specifiers = {specifier},
+ .maxSearchTime = 60,
+ .incrementalResults = false,
+ .incrementalResultsPeriodicity = 1};
Return<void> res = radio_v1_4->startNetworkScan_1_4(serial, request);
ASSERT_OK(res);
@@ -398,7 +403,7 @@
.interval = 60,
.specifiers = {specifier},
.maxSearchTime = 600,
- .incrementalResults = false,
+ .incrementalResults = true,
.incrementalResultsPeriodicity = 0};
Return<void> res = radio_v1_4->startNetworkScan_1_4(serial, request);
@@ -434,7 +439,7 @@
.interval = 60,
.specifiers = {specifier},
.maxSearchTime = 600,
- .incrementalResults = false,
+ .incrementalResults = true,
.incrementalResultsPeriodicity = 11};
Return<void> res = radio_v1_4->startNetworkScan_1_4(serial, request);
diff --git a/renderscript/1.0/default/Device.cpp b/renderscript/1.0/default/Device.cpp
index d603a12..9a6d7ba 100644
--- a/renderscript/1.0/default/Device.cpp
+++ b/renderscript/1.0/default/Device.cpp
@@ -86,150 +86,116 @@
}
dispatchTable dispatchHal = {
- .SetNativeLibDir = (SetNativeLibDirFnPtr) nullptr,
+ .SetNativeLibDir = (SetNativeLibDirFnPtr) nullptr,
- .Allocation1DData =
- (Allocation1DDataFnPtr)dlsym(handle, "rsAllocation1DData"),
- .Allocation1DElementData = (Allocation1DElementDataFnPtr) nullptr,
- .Allocation1DRead =
- (Allocation1DReadFnPtr)dlsym(handle, "rsAllocation1DRead"),
- .Allocation2DData =
- (Allocation2DDataFnPtr)dlsym(handle, "rsAllocation2DData"),
- .Allocation2DRead =
- (Allocation2DReadFnPtr)dlsym(handle, "rsAllocation2DRead"),
- .Allocation3DData =
- (Allocation3DDataFnPtr)dlsym(handle, "rsAllocation3DData"),
- .Allocation3DRead =
- (Allocation3DReadFnPtr)dlsym(handle, "rsAllocation3DRead"),
- .AllocationAdapterCreate = (AllocationAdapterCreateFnPtr)dlsym(
- handle, "rsAllocationAdapterCreate"),
- .AllocationAdapterOffset = (AllocationAdapterOffsetFnPtr)dlsym(
- handle, "rsAllocationAdapterOffset"),
- .AllocationCopy2DRange = (AllocationCopy2DRangeFnPtr)dlsym(
- handle, "rsAllocationCopy2DRange"),
- .AllocationCopy3DRange = (AllocationCopy3DRangeFnPtr)dlsym(
- handle, "rsAllocationCopy3DRange"),
- .AllocationCopyToBitmap = (AllocationCopyToBitmapFnPtr)dlsym(
- handle, "rsAllocationCopyToBitmap"),
- .AllocationCreateFromBitmap = (AllocationCreateFromBitmapFnPtr)dlsym(
- handle, "rsAllocationCreateFromBitmap"),
- .AllocationCreateStrided = (AllocationCreateStridedFnPtr)dlsym(
- handle, "rsAllocationCreateStrided"),
- .AllocationCreateTyped = (AllocationCreateTypedFnPtr)dlsym(
- handle, "rsAllocationCreateTyped"),
- .AllocationCubeCreateFromBitmap =
- (AllocationCubeCreateFromBitmapFnPtr)dlsym(
- handle, "rsAllocationCubeCreateFromBitmap"),
- .AllocationElementData = (AllocationElementDataFnPtr)dlsym(
- handle, "rsAllocationElementData"),
- .AllocationElementRead = (AllocationElementReadFnPtr)dlsym(
- handle, "rsAllocationElementRead"),
- .AllocationGenerateMipmaps = (AllocationGenerateMipmapsFnPtr)dlsym(
- handle, "rsAllocationGenerateMipmaps"),
- .AllocationGetPointer =
- (AllocationGetPointerFnPtr)dlsym(handle, "rsAllocationGetPointer"),
- .AllocationGetSurface =
- (AllocationGetSurfaceFnPtr)dlsym(handle, "rsAllocationGetSurface"),
- .AllocationGetType =
- (AllocationGetTypeFnPtr)dlsym(handle, "rsaAllocationGetType"),
- .AllocationIoReceive =
- (AllocationIoReceiveFnPtr)dlsym(handle, "rsAllocationIoReceive"),
- .AllocationIoSend =
- (AllocationIoSendFnPtr)dlsym(handle, "rsAllocationIoSend"),
- .AllocationRead =
- (AllocationReadFnPtr)dlsym(handle, "rsAllocationRead"),
- .AllocationResize1D =
- (AllocationResize1DFnPtr)dlsym(handle, "rsAllocationResize1D"),
- .AllocationSetSurface =
- (AllocationSetSurfaceFnPtr)dlsym(handle, "rsAllocationSetSurface"),
- .AllocationSetupBufferQueue = (AllocationSetupBufferQueueFnPtr)dlsym(
- handle, "rsAllocationSetupBufferQueue"),
- .AllocationShareBufferQueue = (AllocationShareBufferQueueFnPtr)dlsym(
- handle, "rsAllocationShareBufferQueue"),
- .AllocationSyncAll =
- (AllocationSyncAllFnPtr)dlsym(handle, "rsAllocationSyncAll"),
- .AssignName = (AssignNameFnPtr)dlsym(handle, "rsAssignName"),
- .ClosureCreate = (ClosureCreateFnPtr)dlsym(handle, "rsClosureCreate"),
- .ClosureSetArg = (ClosureSetArgFnPtr)dlsym(handle, "rsClosureSetArg"),
- .ClosureSetGlobal =
- (ClosureSetGlobalFnPtr)dlsym(handle, "rsClosureSetGlobal"),
- .ContextCreateVendor =
- (ContextCreateVendorFnPtr)dlsym(handle, "rsContextCreateVendor"),
- .ContextDeinitToClient = (ContextDeinitToClientFnPtr)dlsym(
- handle, "rsContextDeinitToClient"),
- .ContextDestroy =
- (ContextDestroyFnPtr)dlsym(handle, "rsContextDestroy"),
- .ContextDump = (ContextDumpFnPtr)dlsym(handle, "rsContextDump"),
- .ContextFinish = (ContextFinishFnPtr)dlsym(handle, "rsContextFinish"),
- .ContextGetMessage =
- (ContextGetMessageFnPtr)dlsym(handle, "rsContextGetMessage"),
- .ContextInitToClient =
- (ContextInitToClientFnPtr)dlsym(handle, "rsContextInitToClient"),
- .ContextPeekMessage =
- (ContextPeekMessageFnPtr)dlsym(handle, "rsContextPeekMessage"),
- .ContextSendMessage =
- (ContextSendMessageFnPtr)dlsym(handle, "rsContextSendMessage"),
- .ContextSetCacheDir =
- (ContextSetCacheDirFnPtr)dlsym(handle, "rsContextSetCacheDir"),
- .ContextSetPriority =
- (ContextSetPriorityFnPtr)dlsym(handle, "rsContextSetPriority"),
- .DeviceCreate = (DeviceCreateFnPtr) nullptr,
- .DeviceDestroy = (DeviceDestroyFnPtr) nullptr,
- .DeviceSetConfig = (DeviceSetConfigFnPtr) nullptr,
- .ElementCreate2 =
- (ElementCreate2FnPtr)dlsym(handle, "rsElementCreate2"),
- .ElementCreate = (ElementCreateFnPtr)dlsym(handle, "rsElementCreate"),
- .ElementGetNativeData =
- (ElementGetNativeDataFnPtr)dlsym(handle, "rsaElementGetNativeData"),
- .ElementGetSubElements = (ElementGetSubElementsFnPtr)dlsym(
- handle, "rsaElementGetSubElements"),
- .GetName = (GetNameFnPtr)dlsym(handle, "rsaGetName"),
- .InvokeClosureCreate =
- (InvokeClosureCreateFnPtr)dlsym(handle, "rsInvokeClosureCreate"),
- .ObjDestroy = (ObjDestroyFnPtr)dlsym(handle, "rsObjDestroy"),
- .SamplerCreate = (SamplerCreateFnPtr)dlsym(handle, "rsSamplerCreate"),
- .ScriptBindAllocation =
- (ScriptBindAllocationFnPtr)dlsym(handle, "rsScriptBindAllocation"),
- .ScriptCCreate = (ScriptCCreateFnPtr)dlsym(handle, "rsScriptCCreate"),
- .ScriptFieldIDCreate =
- (ScriptFieldIDCreateFnPtr)dlsym(handle, "rsScriptFieldIDCreate"),
- .ScriptForEach = (ScriptForEachFnPtr) nullptr,
- .ScriptForEachMulti =
- (ScriptForEachMultiFnPtr)dlsym(handle, "rsScriptForEachMulti"),
- .ScriptGetVarV = (ScriptGetVarVFnPtr)dlsym(handle, "rsScriptGetVarV"),
- .ScriptGroup2Create =
- (ScriptGroup2CreateFnPtr)dlsym(handle, "rsScriptGroup2Create"),
- .ScriptGroupCreate =
- (ScriptGroupCreateFnPtr)dlsym(handle, "rsScriptGroupCreate"),
- .ScriptGroupExecute =
- (ScriptGroupExecuteFnPtr)dlsym(handle, "rsScriptGroupExecute"),
- .ScriptGroupSetInput =
- (ScriptGroupSetInputFnPtr)dlsym(handle, "rsScriptGroupSetInput"),
- .ScriptGroupSetOutput =
- (ScriptGroupSetOutputFnPtr)dlsym(handle, "rsScriptGroupSetOutput"),
- .ScriptIntrinsicCreate = (ScriptIntrinsicCreateFnPtr)dlsym(
- handle, "rsScriptIntrinsicCreate"),
- .ScriptInvoke = (ScriptInvokeFnPtr)dlsym(handle, "rsScriptInvoke"),
- .ScriptInvokeIDCreate =
- (ScriptInvokeIDCreateFnPtr)dlsym(handle, "rsScriptInvokeIDCreate"),
- .ScriptInvokeV = (ScriptInvokeVFnPtr)dlsym(handle, "rsScriptInvokeV"),
- .ScriptKernelIDCreate =
- (ScriptKernelIDCreateFnPtr)dlsym(handle, "rsScriptKernelIDCreate"),
- .ScriptReduce = (ScriptReduceFnPtr)dlsym(handle, "rsScriptReduce"),
- .ScriptSetTimeZone =
- (ScriptSetTimeZoneFnPtr)dlsym(handle, "rsScriptSetTimeZone"),
- .ScriptSetVarD = (ScriptSetVarDFnPtr)dlsym(handle, "rsScriptSetVarD"),
- .ScriptSetVarF = (ScriptSetVarFFnPtr)dlsym(handle, "rsScriptSetVarF"),
- .ScriptSetVarI = (ScriptSetVarIFnPtr)dlsym(handle, "rsScriptSetVarI"),
- .ScriptSetVarJ = (ScriptSetVarJFnPtr)dlsym(handle, "rsScriptSetVarJ"),
- .ScriptSetVarObj =
- (ScriptSetVarObjFnPtr)dlsym(handle, "rsScriptSetVarObj"),
- .ScriptSetVarVE =
- (ScriptSetVarVEFnPtr)dlsym(handle, "rsScriptSetVarVE"),
- .ScriptSetVarV = (ScriptSetVarVFnPtr)dlsym(handle, "rsScriptSetVarV"),
- .TypeCreate = (TypeCreateFnPtr)dlsym(handle, "rsTypeCreate"),
- .TypeGetNativeData =
- (TypeGetNativeDataFnPtr)dlsym(handle, "rsaTypeGetNativeData"),
+ .Allocation1DData = (Allocation1DDataFnPtr)dlsym(handle, "rsAllocation1DData"),
+ .Allocation1DElementData = (Allocation1DElementDataFnPtr) nullptr,
+ .Allocation1DRead = (Allocation1DReadFnPtr)dlsym(handle, "rsAllocation1DRead"),
+ .Allocation2DData = (Allocation2DDataFnPtr)dlsym(handle, "rsAllocation2DData"),
+ .Allocation2DRead = (Allocation2DReadFnPtr)dlsym(handle, "rsAllocation2DRead"),
+ .Allocation3DData = (Allocation3DDataFnPtr)dlsym(handle, "rsAllocation3DData"),
+ .Allocation3DRead = (Allocation3DReadFnPtr)dlsym(handle, "rsAllocation3DRead"),
+ .AllocationAdapterCreate =
+ (AllocationAdapterCreateFnPtr)dlsym(handle, "rsAllocationAdapterCreate"),
+ .AllocationAdapterOffset =
+ (AllocationAdapterOffsetFnPtr)dlsym(handle, "rsAllocationAdapterOffset"),
+ .AllocationCopy2DRange =
+ (AllocationCopy2DRangeFnPtr)dlsym(handle, "rsAllocationCopy2DRange"),
+ .AllocationCopy3DRange =
+ (AllocationCopy3DRangeFnPtr)dlsym(handle, "rsAllocationCopy3DRange"),
+ .AllocationCopyToBitmap =
+ (AllocationCopyToBitmapFnPtr)dlsym(handle, "rsAllocationCopyToBitmap"),
+ .AllocationCreateFromBitmap =
+ (AllocationCreateFromBitmapFnPtr)dlsym(handle, "rsAllocationCreateFromBitmap"),
+ .AllocationCreateStrided =
+ (AllocationCreateStridedFnPtr)dlsym(handle, "rsAllocationCreateStrided"),
+ .AllocationCreateTyped =
+ (AllocationCreateTypedFnPtr)dlsym(handle, "rsAllocationCreateTyped"),
+ .AllocationCubeCreateFromBitmap = (AllocationCubeCreateFromBitmapFnPtr)dlsym(
+ handle, "rsAllocationCubeCreateFromBitmap"),
+ .AllocationElementData =
+ (AllocationElementDataFnPtr)dlsym(handle, "rsAllocationElementData"),
+ .AllocationElementRead =
+ (AllocationElementReadFnPtr)dlsym(handle, "rsAllocationElementRead"),
+ .AllocationGenerateMipmaps =
+ (AllocationGenerateMipmapsFnPtr)dlsym(handle, "rsAllocationGenerateMipmaps"),
+ .AllocationGetPointer =
+ (AllocationGetPointerFnPtr)dlsym(handle, "rsAllocationGetPointer"),
+ .AllocationGetSurface =
+ (AllocationGetSurfaceFnPtr)dlsym(handle, "rsAllocationGetSurface"),
+ .AllocationGetType = (AllocationGetTypeFnPtr)dlsym(handle, "rsaAllocationGetType"),
+ .AllocationIoReceive = (AllocationIoReceiveFnPtr)dlsym(handle, "rsAllocationIoReceive"),
+ .AllocationIoSend = (AllocationIoSendFnPtr)dlsym(handle, "rsAllocationIoSend"),
+ .AllocationRead = (AllocationReadFnPtr)dlsym(handle, "rsAllocationRead"),
+ .AllocationResize1D = (AllocationResize1DFnPtr)dlsym(handle, "rsAllocationResize1D"),
+ .AllocationSetSurface =
+ (AllocationSetSurfaceFnPtr)dlsym(handle, "rsAllocationSetSurface"),
+ .AllocationSyncAll = (AllocationSyncAllFnPtr)dlsym(handle, "rsAllocationSyncAll"),
+ .AllocationSetupBufferQueue =
+ (AllocationSetupBufferQueueFnPtr)dlsym(handle, "rsAllocationSetupBufferQueue"),
+ .AllocationShareBufferQueue =
+ (AllocationShareBufferQueueFnPtr)dlsym(handle, "rsAllocationShareBufferQueue"),
+ .AssignName = (AssignNameFnPtr)dlsym(handle, "rsAssignName"),
+ .ClosureCreate = (ClosureCreateFnPtr)dlsym(handle, "rsClosureCreate"),
+ .ClosureSetArg = (ClosureSetArgFnPtr)dlsym(handle, "rsClosureSetArg"),
+ .ClosureSetGlobal = (ClosureSetGlobalFnPtr)dlsym(handle, "rsClosureSetGlobal"),
+ .ContextCreateVendor = (ContextCreateVendorFnPtr)dlsym(handle, "rsContextCreateVendor"),
+ .ContextDeinitToClient =
+ (ContextDeinitToClientFnPtr)dlsym(handle, "rsContextDeinitToClient"),
+ .ContextDestroy = (ContextDestroyFnPtr)dlsym(handle, "rsContextDestroy"),
+ .ContextDump = (ContextDumpFnPtr)dlsym(handle, "rsContextDump"),
+ .ContextFinish = (ContextFinishFnPtr)dlsym(handle, "rsContextFinish"),
+ .ContextGetMessage = (ContextGetMessageFnPtr)dlsym(handle, "rsContextGetMessage"),
+ .ContextInitToClient = (ContextInitToClientFnPtr)dlsym(handle, "rsContextInitToClient"),
+ .ContextPeekMessage = (ContextPeekMessageFnPtr)dlsym(handle, "rsContextPeekMessage"),
+ .ContextSendMessage = (ContextSendMessageFnPtr)dlsym(handle, "rsContextSendMessage"),
+ .ContextSetPriority = (ContextSetPriorityFnPtr)dlsym(handle, "rsContextSetPriority"),
+ .ContextSetCacheDir = (ContextSetCacheDirFnPtr)dlsym(handle, "rsContextSetCacheDir"),
+ .DeviceCreate = (DeviceCreateFnPtr) nullptr,
+ .DeviceDestroy = (DeviceDestroyFnPtr) nullptr,
+ .DeviceSetConfig = (DeviceSetConfigFnPtr) nullptr,
+ .ElementCreate2 = (ElementCreate2FnPtr)dlsym(handle, "rsElementCreate2"),
+ .ElementCreate = (ElementCreateFnPtr)dlsym(handle, "rsElementCreate"),
+ .ElementGetNativeData =
+ (ElementGetNativeDataFnPtr)dlsym(handle, "rsaElementGetNativeData"),
+ .ElementGetSubElements =
+ (ElementGetSubElementsFnPtr)dlsym(handle, "rsaElementGetSubElements"),
+ .GetName = (GetNameFnPtr)dlsym(handle, "rsaGetName"),
+ .InvokeClosureCreate = (InvokeClosureCreateFnPtr)dlsym(handle, "rsInvokeClosureCreate"),
+ .ObjDestroy = (ObjDestroyFnPtr)dlsym(handle, "rsObjDestroy"),
+ .SamplerCreate = (SamplerCreateFnPtr)dlsym(handle, "rsSamplerCreate"),
+ .ScriptBindAllocation =
+ (ScriptBindAllocationFnPtr)dlsym(handle, "rsScriptBindAllocation"),
+ .ScriptCCreate = (ScriptCCreateFnPtr)dlsym(handle, "rsScriptCCreate"),
+ .ScriptFieldIDCreate = (ScriptFieldIDCreateFnPtr)dlsym(handle, "rsScriptFieldIDCreate"),
+ .ScriptForEach = (ScriptForEachFnPtr) nullptr,
+ .ScriptForEachMulti = (ScriptForEachMultiFnPtr)dlsym(handle, "rsScriptForEachMulti"),
+ .ScriptGetVarV = (ScriptGetVarVFnPtr)dlsym(handle, "rsScriptGetVarV"),
+ .ScriptGroup2Create = (ScriptGroup2CreateFnPtr)dlsym(handle, "rsScriptGroup2Create"),
+ .ScriptGroupCreate = (ScriptGroupCreateFnPtr)dlsym(handle, "rsScriptGroupCreate"),
+ .ScriptGroupExecute = (ScriptGroupExecuteFnPtr)dlsym(handle, "rsScriptGroupExecute"),
+ .ScriptGroupSetInput = (ScriptGroupSetInputFnPtr)dlsym(handle, "rsScriptGroupSetInput"),
+ .ScriptGroupSetOutput =
+ (ScriptGroupSetOutputFnPtr)dlsym(handle, "rsScriptGroupSetOutput"),
+ .ScriptIntrinsicCreate =
+ (ScriptIntrinsicCreateFnPtr)dlsym(handle, "rsScriptIntrinsicCreate"),
+ .ScriptInvoke = (ScriptInvokeFnPtr)dlsym(handle, "rsScriptInvoke"),
+ .ScriptInvokeIDCreate =
+ (ScriptInvokeIDCreateFnPtr)dlsym(handle, "rsScriptInvokeIDCreate"),
+ .ScriptInvokeV = (ScriptInvokeVFnPtr)dlsym(handle, "rsScriptInvokeV"),
+ .ScriptKernelIDCreate =
+ (ScriptKernelIDCreateFnPtr)dlsym(handle, "rsScriptKernelIDCreate"),
+ .ScriptReduce = (ScriptReduceFnPtr)dlsym(handle, "rsScriptReduce"),
+ .ScriptSetTimeZone = (ScriptSetTimeZoneFnPtr)dlsym(handle, "rsScriptSetTimeZone"),
+ .ScriptSetVarD = (ScriptSetVarDFnPtr)dlsym(handle, "rsScriptSetVarD"),
+ .ScriptSetVarF = (ScriptSetVarFFnPtr)dlsym(handle, "rsScriptSetVarF"),
+ .ScriptSetVarI = (ScriptSetVarIFnPtr)dlsym(handle, "rsScriptSetVarI"),
+ .ScriptSetVarJ = (ScriptSetVarJFnPtr)dlsym(handle, "rsScriptSetVarJ"),
+ .ScriptSetVarObj = (ScriptSetVarObjFnPtr)dlsym(handle, "rsScriptSetVarObj"),
+ .ScriptSetVarVE = (ScriptSetVarVEFnPtr)dlsym(handle, "rsScriptSetVarVE"),
+ .ScriptSetVarV = (ScriptSetVarVFnPtr)dlsym(handle, "rsScriptSetVarV"),
+ .TypeCreate = (TypeCreateFnPtr)dlsym(handle, "rsTypeCreate"),
+ .TypeGetNativeData = (TypeGetNativeDataFnPtr)dlsym(handle, "rsaTypeGetNativeData"),
};
return dispatchHal;
diff --git a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
index 4faa562..b41730b 100644
--- a/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
+++ b/sensors/1.0/default/android.hardware.sensors@1.0-service.rc
@@ -2,6 +2,6 @@
interface android.hardware.sensors@1.0::ISensors default
class hal
user system
- group system wakelock
+ group system wakelock uhid
capabilities BLOCK_SUSPEND
rlimit rtprio 10 10
diff --git a/sensors/2.0/multihal/Android.bp b/sensors/2.0/multihal/Android.bp
index 697bf9e..5c67676 100644
--- a/sensors/2.0/multihal/Android.bp
+++ b/sensors/2.0/multihal/Android.bp
@@ -24,11 +24,11 @@
"libcutils",
"libfmq",
"libhidlbase",
- "libhidltransport",
"liblog",
"libpower",
"libutils",
],
+ cflags: ["-DLOG_TAG=\"SensorsMultiHal\""],
}
cc_binary {
@@ -42,10 +42,10 @@
srcs: [
"service.cpp",
"HalProxy.cpp",
+ "ScopedWakelock.cpp",
],
init_rc: ["android.hardware.sensors@2.0-service-multihal.rc"],
vintf_fragments: ["android.hardware.sensors@2.0-multihal.xml"],
- cflags: ["-DLOG_TAG=\"SensorsMultiHal\""],
}
cc_library_headers {
@@ -61,8 +61,12 @@
vendor_available: true,
srcs: [
"HalProxy.cpp",
+ "ScopedWakelock.cpp",
],
export_header_lib_headers: [
"android.hardware.sensors@2.0-multihal.header",
],
+ shared_libs: [
+ "libutils",
+ ],
}
diff --git a/sensors/2.0/multihal/HalProxy.cpp b/sensors/2.0/multihal/HalProxy.cpp
index b4d2466..d667218 100644
--- a/sensors/2.0/multihal/HalProxy.cpp
+++ b/sensors/2.0/multihal/HalProxy.cpp
@@ -16,12 +16,18 @@
#include "HalProxy.h"
+#include "SubHal.h"
+
#include <android/hardware/sensors/2.0/types.h>
+#include "hardware_legacy/power.h"
+
#include <dlfcn.h>
+#include <cinttypes>
#include <fstream>
#include <functional>
+#include <thread>
namespace android {
namespace hardware {
@@ -29,62 +35,53 @@
namespace V2_0 {
namespace implementation {
+using ::android::hardware::sensors::V2_0::EventQueueFlagBits;
+using ::android::hardware::sensors::V2_0::WakeLockQueueFlagBits;
+using ::android::hardware::sensors::V2_0::implementation::getTimeNow;
+using ::android::hardware::sensors::V2_0::implementation::kWakelockTimeoutNs;
+
typedef ISensorsSubHal*(SensorsHalGetSubHalFunc)(uint32_t*);
-// TODO: Use this wake lock name as the prefix to all sensors HAL wake locks acquired.
-// constexpr const char* kWakeLockName = "SensorsHAL_WAKEUP";
-
-// TODO: Use the following class as a starting point for implementing the full HalProxyCallback
-// along with being inspiration for how to implement the ScopedWakelock class.
/**
- * Callback class used to provide the HalProxy with the index of which subHal is invoking
+ * Set the subhal index as first byte of sensor handle and return this modified version.
+ *
+ * @param sensorHandle The sensor handle to modify.
+ * @param subHalIndex The index in the hal proxy of the sub hal this sensor belongs to.
+ *
+ * @return The modified sensor handle.
*/
-class SensorsCallbackProxy : public ISensorsCallback {
- public:
- SensorsCallbackProxy(wp<HalProxy>& halProxy, int32_t subHalIndex)
- : mHalProxy(halProxy), mSubHalIndex(subHalIndex) {}
-
- Return<void> onDynamicSensorsConnected(
- const hidl_vec<SensorInfo>& dynamicSensorsAdded) override {
- sp<HalProxy> halProxy(mHalProxy.promote());
- if (halProxy != nullptr) {
- return halProxy->onDynamicSensorsConnected(dynamicSensorsAdded, mSubHalIndex);
- }
- return Return<void>();
- }
-
- Return<void> onDynamicSensorsDisconnected(
- const hidl_vec<int32_t>& dynamicSensorHandlesRemoved) override {
- sp<HalProxy> halProxy(mHalProxy.promote());
- if (halProxy != nullptr) {
- return halProxy->onDynamicSensorsDisconnected(dynamicSensorHandlesRemoved,
- mSubHalIndex);
- }
- return Return<void>();
- }
-
- private:
- wp<HalProxy>& mHalProxy;
- int32_t mSubHalIndex;
-};
+uint32_t setSubHalIndex(uint32_t sensorHandle, size_t subHalIndex) {
+ return sensorHandle | (subHalIndex << 24);
+}
HalProxy::HalProxy() {
const char* kMultiHalConfigFile = "/vendor/etc/sensors/hals.conf";
initializeSubHalListFromConfigFile(kMultiHalConfigFile);
- initializeSensorList();
+ init();
}
HalProxy::HalProxy(std::vector<ISensorsSubHal*>& subHalList) : mSubHalList(subHalList) {
- initializeSensorList();
+ init();
}
HalProxy::~HalProxy() {
- // TODO: Join any running threads and clean up FMQs and any other allocated
- // state.
+ mThreadsRun.store(false);
+ mWakelockCV.notify_one();
+ mEventQueueWriteCV.notify_one();
+ if (mPendingWritesThread.joinable()) {
+ mPendingWritesThread.join();
+ }
+ if (mWakelockThread.joinable()) {
+ mWakelockThread.join();
+ }
}
Return<void> HalProxy::getSensorsList(getSensorsList_cb _hidl_cb) {
- _hidl_cb(mSensorList);
+ std::vector<SensorInfo> sensors;
+ for (const auto& iter : mSensors) {
+ sensors.push_back(iter.second);
+ }
+ _hidl_cb(sensors);
return Void();
}
@@ -146,7 +143,19 @@
result = Result::BAD_VALUE;
}
- // TODO: start threads to read wake locks and process events from sub HALs.
+ mPendingWritesThread = std::thread(startPendingWritesThread, this);
+ mWakelockThread = std::thread(startWakelockThread, this);
+
+ for (size_t i = 0; i < mSubHalList.size(); i++) {
+ auto subHal = mSubHalList[i];
+ const auto& subHalCallback = mSubHalCallbacks[i];
+ Result currRes = subHal->initialize(subHalCallback);
+ if (currRes != Result::OK) {
+ result = currRes;
+ ALOGE("Subhal '%s' failed to initialize.", subHal->getName().c_str());
+ break;
+ }
+ }
return result;
}
@@ -161,30 +170,50 @@
return getSubHalForSensorHandle(sensorHandle)->flush(clearSubHalIndex(sensorHandle));
}
-Return<Result> HalProxy::injectSensorData(const Event& /* event */) {
- // TODO: Proxy API call to appropriate sub-HAL.
- return Result::INVALID_OPERATION;
+Return<Result> HalProxy::injectSensorData(const Event& event) {
+ Result result = Result::OK;
+ if (mCurrentOperationMode == OperationMode::NORMAL &&
+ event.sensorType != V1_0::SensorType::ADDITIONAL_INFO) {
+ ALOGE("An event with type != ADDITIONAL_INFO passed to injectSensorData while operation"
+ " mode was NORMAL.");
+ result = Result::BAD_VALUE;
+ }
+ if (result == Result::OK) {
+ Event subHalEvent = event;
+ subHalEvent.sensorHandle = clearSubHalIndex(event.sensorHandle);
+ result = getSubHalForSensorHandle(event.sensorHandle)->injectSensorData(subHalEvent);
+ }
+ return result;
}
-Return<void> HalProxy::registerDirectChannel(const SharedMemInfo& /* mem */,
+Return<void> HalProxy::registerDirectChannel(const SharedMemInfo& mem,
registerDirectChannel_cb _hidl_cb) {
- // TODO: During init, discover the first sub-HAL in the config that has sensors with direct
- // channel support, if any, and proxy the API call there.
- _hidl_cb(Result::INVALID_OPERATION, -1 /* channelHandle */);
+ if (mDirectChannelSubHal == nullptr) {
+ _hidl_cb(Result::INVALID_OPERATION, -1 /* channelHandle */);
+ } else {
+ mDirectChannelSubHal->registerDirectChannel(mem, _hidl_cb);
+ }
return Return<void>();
}
-Return<Result> HalProxy::unregisterDirectChannel(int32_t /* channelHandle */) {
- // TODO: During init, discover the first sub-HAL in the config that has sensors with direct
- // channel support, if any, and proxy the API call there.
- return Result::INVALID_OPERATION;
+Return<Result> HalProxy::unregisterDirectChannel(int32_t channelHandle) {
+ Result result;
+ if (mDirectChannelSubHal == nullptr) {
+ result = Result::INVALID_OPERATION;
+ } else {
+ result = mDirectChannelSubHal->unregisterDirectChannel(channelHandle);
+ }
+ return result;
}
-Return<void> HalProxy::configDirectReport(int32_t /* sensorHandle */, int32_t /* channelHandle */,
- RateLevel /* rate */, configDirectReport_cb _hidl_cb) {
- // TODO: During init, discover the first sub-HAL in the config that has sensors with direct
- // channel support, if any, and proxy the API call there.
- _hidl_cb(Result::INVALID_OPERATION, 0 /* reportToken */);
+Return<void> HalProxy::configDirectReport(int32_t sensorHandle, int32_t channelHandle,
+ RateLevel rate, configDirectReport_cb _hidl_cb) {
+ if (mDirectChannelSubHal == nullptr) {
+ _hidl_cb(Result::INVALID_OPERATION, -1 /* reportToken */);
+ } else {
+ mDirectChannelSubHal->configDirectReport(clearSubHalIndex(sensorHandle), channelHandle,
+ rate, _hidl_cb);
+ }
return Return<void>();
}
@@ -193,15 +222,45 @@
return Return<void>();
}
-Return<void> HalProxy::onDynamicSensorsConnected(
- const hidl_vec<SensorInfo>& /* dynamicSensorsAdded */, int32_t /* subHalIndex */) {
- // TODO: Map the SensorInfo to the global list and then invoke the framework's callback.
+Return<void> HalProxy::onDynamicSensorsConnected(const hidl_vec<SensorInfo>& dynamicSensorsAdded,
+ int32_t subHalIndex) {
+ std::vector<SensorInfo> sensors;
+ {
+ std::lock_guard<std::mutex> lock(mDynamicSensorsMutex);
+ for (SensorInfo sensor : dynamicSensorsAdded) {
+ if (!subHalIndexIsClear(sensor.sensorHandle)) {
+ ALOGE("Dynamic sensor added %s had sensorHandle with first byte not 0.",
+ sensor.name.c_str());
+ } else {
+ sensor.sensorHandle = setSubHalIndex(sensor.sensorHandle, subHalIndex);
+ mDynamicSensors[sensor.sensorHandle] = sensor;
+ sensors.push_back(sensor);
+ }
+ }
+ }
+ mDynamicSensorsCallback->onDynamicSensorsConnected(sensors);
return Return<void>();
}
Return<void> HalProxy::onDynamicSensorsDisconnected(
- const hidl_vec<int32_t>& /* dynamicSensorHandlesRemoved */, int32_t /* subHalIndex */) {
- // TODO: Unmap the SensorInfo from the global list and then invoke the framework's callback.
+ const hidl_vec<int32_t>& dynamicSensorHandlesRemoved, int32_t subHalIndex) {
+ // TODO: Block this call until all pending events are flushed from queue
+ std::vector<int32_t> sensorHandles;
+ {
+ std::lock_guard<std::mutex> lock(mDynamicSensorsMutex);
+ for (int32_t sensorHandle : dynamicSensorHandlesRemoved) {
+ if (!subHalIndexIsClear(sensorHandle)) {
+ ALOGE("Dynamic sensorHandle removed had first byte not 0.");
+ } else {
+ sensorHandle = setSubHalIndex(sensorHandle, subHalIndex);
+ if (mDynamicSensors.find(sensorHandle) != mDynamicSensors.end()) {
+ mDynamicSensors.erase(sensorHandle);
+ sensorHandles.push_back(sensorHandle);
+ }
+ }
+ }
+ }
+ mDynamicSensorsCallback->onDynamicSensorsDisconnected(sensorHandles);
return Return<void>();
}
@@ -239,18 +298,25 @@
}
}
+void HalProxy::initializeSubHalCallbacks() {
+ for (size_t subHalIndex = 0; subHalIndex < mSubHalList.size(); subHalIndex++) {
+ sp<IHalProxyCallback> callback = new HalProxyCallback(this, subHalIndex);
+ mSubHalCallbacks.push_back(callback);
+ }
+}
+
void HalProxy::initializeSensorList() {
for (size_t subHalIndex = 0; subHalIndex < mSubHalList.size(); subHalIndex++) {
ISensorsSubHal* subHal = mSubHalList[subHalIndex];
auto result = subHal->getSensorsList([&](const auto& list) {
for (SensorInfo sensor : list) {
- if ((sensor.sensorHandle & kSensorHandleSubHalIndexMask) != 0) {
+ if (!subHalIndexIsClear(sensor.sensorHandle)) {
ALOGE("SubHal sensorHandle's first byte was not 0");
} else {
ALOGV("Loaded sensor: %s", sensor.name.c_str());
sensor.sensorHandle |= (subHalIndex << 24);
setDirectChannelFlags(&sensor, subHal);
- mSensorList.push_back(sensor);
+ mSensors[sensor.sensorHandle] = sensor;
}
}
});
@@ -260,6 +326,160 @@
}
}
+void HalProxy::init() {
+ initializeSubHalCallbacks();
+ initializeSensorList();
+}
+
+void HalProxy::startPendingWritesThread(HalProxy* halProxy) {
+ halProxy->handlePendingWrites();
+}
+
+void HalProxy::handlePendingWrites() {
+ // TODO: Find a way to optimize locking strategy maybe using two mutexes instead of one.
+ std::unique_lock<std::mutex> lock(mEventQueueWriteMutex);
+ while (mThreadsRun.load()) {
+ mEventQueueWriteCV.wait(
+ lock, [&] { return !mPendingWriteEventsQueue.empty() || !mThreadsRun.load(); });
+ if (mThreadsRun.load()) {
+ std::vector<Event>& pendingWriteEvents = mPendingWriteEventsQueue.front().first;
+ size_t numWakeupEvents = mPendingWriteEventsQueue.front().second;
+ size_t eventQueueSize = mEventQueue->getQuantumCount();
+ size_t numToWrite = std::min(pendingWriteEvents.size(), eventQueueSize);
+ lock.unlock();
+ // TODO: Find a way to interrup writeBlocking if the thread should exit
+ // so we don't have to wait for timeout on framework restarts.
+ if (!mEventQueue->writeBlocking(
+ pendingWriteEvents.data(), numToWrite,
+ static_cast<uint32_t>(EventQueueFlagBits::EVENTS_READ),
+ static_cast<uint32_t>(EventQueueFlagBits::READ_AND_PROCESS),
+ kPendingWriteTimeoutNs, mEventQueueFlag)) {
+ ALOGE("Dropping %zu events after blockingWrite failed.", numToWrite);
+ if (numWakeupEvents > 0) {
+ if (pendingWriteEvents.size() > eventQueueSize) {
+ decrementRefCountAndMaybeReleaseWakelock(
+ countNumWakeupEvents(pendingWriteEvents, eventQueueSize));
+ } else {
+ decrementRefCountAndMaybeReleaseWakelock(numWakeupEvents);
+ }
+ }
+ }
+ lock.lock();
+ if (pendingWriteEvents.size() > eventQueueSize) {
+ // TODO: Check if this erase operation is too inefficient. It will copy all the
+ // events ahead of it down to fill gap off array at front after the erase.
+ pendingWriteEvents.erase(pendingWriteEvents.begin(),
+ pendingWriteEvents.begin() + eventQueueSize);
+ } else {
+ mPendingWriteEventsQueue.pop();
+ }
+ }
+ }
+}
+
+void HalProxy::startWakelockThread(HalProxy* halProxy) {
+ halProxy->handleWakelocks();
+}
+
+void HalProxy::handleWakelocks() {
+ std::unique_lock<std::recursive_mutex> lock(mWakelockMutex);
+ while (mThreadsRun.load()) {
+ mWakelockCV.wait(lock, [&] { return mWakelockRefCount > 0 || !mThreadsRun.load(); });
+ if (mThreadsRun.load()) {
+ int64_t timeLeft;
+ if (sharedWakelockDidTimeout(&timeLeft)) {
+ resetSharedWakelock();
+ } else {
+ uint32_t numWakeLocksProcessed;
+ lock.unlock();
+ bool success = mWakeLockQueue->readBlocking(
+ &numWakeLocksProcessed, 1, 0,
+ static_cast<uint32_t>(WakeLockQueueFlagBits::DATA_WRITTEN), timeLeft);
+ lock.lock();
+ if (success) {
+ decrementRefCountAndMaybeReleaseWakelock(
+ static_cast<size_t>(numWakeLocksProcessed));
+ }
+ }
+ }
+ }
+ resetSharedWakelock();
+}
+
+bool HalProxy::sharedWakelockDidTimeout(int64_t* timeLeft) {
+ bool didTimeout;
+ int64_t duration = getTimeNow() - mWakelockTimeoutStartTime;
+ if (duration > kWakelockTimeoutNs) {
+ didTimeout = true;
+ } else {
+ didTimeout = false;
+ *timeLeft = kWakelockTimeoutNs - duration;
+ }
+ return didTimeout;
+}
+
+void HalProxy::resetSharedWakelock() {
+ std::lock_guard<std::recursive_mutex> lockGuard(mWakelockMutex);
+ decrementRefCountAndMaybeReleaseWakelock(mWakelockRefCount);
+ mWakelockTimeoutResetTime = getTimeNow();
+}
+
+void HalProxy::postEventsToMessageQueue(const std::vector<Event>& events, size_t numWakeupEvents,
+ ScopedWakelock wakelock) {
+ size_t numToWrite = 0;
+ std::lock_guard<std::mutex> lock(mEventQueueWriteMutex);
+ if (wakelock.isLocked()) {
+ incrementRefCountAndMaybeAcquireWakelock(numWakeupEvents);
+ }
+ if (mPendingWriteEventsQueue.empty()) {
+ numToWrite = std::min(events.size(), mEventQueue->availableToWrite());
+ if (numToWrite > 0) {
+ if (mEventQueue->write(events.data(), numToWrite)) {
+ // TODO: While loop if mEventQueue->avaiableToWrite > 0 to possibly fit in more
+ // writes immediately
+ mEventQueueFlag->wake(static_cast<uint32_t>(EventQueueFlagBits::READ_AND_PROCESS));
+ } else {
+ numToWrite = 0;
+ }
+ }
+ }
+ if (numToWrite < events.size()) {
+ // TODO: Bound the mPendingWriteEventsQueue so that we do not trigger OOMs if framework
+ // stalls
+ std::vector<Event> eventsLeft(events.begin() + numToWrite, events.end());
+ mPendingWriteEventsQueue.push({eventsLeft, numWakeupEvents});
+ mEventQueueWriteCV.notify_one();
+ }
+}
+
+bool HalProxy::incrementRefCountAndMaybeAcquireWakelock(size_t delta,
+ int64_t* timeoutStart /* = nullptr */) {
+ if (!mThreadsRun.load()) return false;
+ std::lock_guard<std::recursive_mutex> lockGuard(mWakelockMutex);
+ if (mWakelockRefCount == 0) {
+ acquire_wake_lock(PARTIAL_WAKE_LOCK, kWakelockName);
+ mWakelockCV.notify_one();
+ }
+ mWakelockTimeoutStartTime = getTimeNow();
+ mWakelockRefCount += delta;
+ if (timeoutStart != nullptr) {
+ *timeoutStart = mWakelockTimeoutStartTime;
+ }
+ return true;
+}
+
+void HalProxy::decrementRefCountAndMaybeReleaseWakelock(size_t delta,
+ int64_t timeoutStart /* = -1 */) {
+ if (!mThreadsRun.load()) return;
+ std::lock_guard<std::recursive_mutex> lockGuard(mWakelockMutex);
+ if (timeoutStart == -1) timeoutStart = mWakelockTimeoutResetTime;
+ if (mWakelockRefCount == 0 || timeoutStart < mWakelockTimeoutResetTime) return;
+ mWakelockRefCount -= std::min(mWakelockRefCount, delta);
+ if (mWakelockRefCount == 0) {
+ release_wake_lock(kWakelockName);
+ }
+}
+
void HalProxy::setDirectChannelFlags(SensorInfo* sensorInfo, ISensorsSubHal* subHal) {
bool sensorSupportsDirectChannel =
(sensorInfo->flags & (V1_0::SensorFlagBits::MASK_DIRECT_REPORT |
@@ -278,10 +498,63 @@
return mSubHalList[static_cast<size_t>(sensorHandle >> 24)];
}
+size_t HalProxy::countNumWakeupEvents(const std::vector<Event>& events, size_t n) {
+ size_t numWakeupEvents = 0;
+ for (size_t i = 0; i < n; i++) {
+ int32_t sensorHandle = events[i].sensorHandle;
+ if (mSensors[sensorHandle].flags & static_cast<uint32_t>(V1_0::SensorFlagBits::WAKE_UP)) {
+ numWakeupEvents++;
+ }
+ }
+ return numWakeupEvents;
+}
+
uint32_t HalProxy::clearSubHalIndex(uint32_t sensorHandle) {
return sensorHandle & (~kSensorHandleSubHalIndexMask);
}
+bool HalProxy::subHalIndexIsClear(uint32_t sensorHandle) {
+ return (sensorHandle & kSensorHandleSubHalIndexMask) == 0;
+}
+
+void HalProxyCallback::postEvents(const std::vector<Event>& events, ScopedWakelock wakelock) {
+ if (events.empty() || !mHalProxy->areThreadsRunning()) return;
+ size_t numWakeupEvents;
+ std::vector<Event> processedEvents = processEvents(events, &numWakeupEvents);
+ if (numWakeupEvents > 0) {
+ ALOG_ASSERT(wakelock.isLocked(),
+ "Wakeup events posted while wakelock unlocked for subhal"
+ " w/ index %zu.",
+ mSubHalIndex);
+ } else {
+ ALOG_ASSERT(!wakelock.isLocked(),
+ "No Wakeup events posted but wakelock locked for subhal"
+ " w/ index %zu.",
+ mSubHalIndex);
+ }
+ mHalProxy->postEventsToMessageQueue(events, numWakeupEvents, std::move(wakelock));
+}
+
+ScopedWakelock HalProxyCallback::createScopedWakelock(bool lock) {
+ ScopedWakelock wakelock(mHalProxy, lock);
+ return wakelock;
+}
+
+std::vector<Event> HalProxyCallback::processEvents(const std::vector<Event>& events,
+ size_t* numWakeupEvents) const {
+ *numWakeupEvents = 0;
+ std::vector<Event> eventsOut;
+ for (Event event : events) {
+ event.sensorHandle = setSubHalIndex(event.sensorHandle, mSubHalIndex);
+ eventsOut.push_back(event);
+ const SensorInfo& sensor = mHalProxy->getSensorInfo(event.sensorHandle);
+ if ((sensor.flags & V1_0::SensorFlagBits::WAKE_UP) != 0) {
+ (*numWakeupEvents)++;
+ }
+ }
+ return eventsOut;
+}
+
} // namespace implementation
} // namespace V2_0
} // namespace sensors
diff --git a/sensors/2.0/multihal/ScopedWakelock.cpp b/sensors/2.0/multihal/ScopedWakelock.cpp
new file mode 100644
index 0000000..d85d4a7
--- /dev/null
+++ b/sensors/2.0/multihal/ScopedWakelock.cpp
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScopedWakelock.h"
+
+namespace android {
+namespace hardware {
+namespace sensors {
+namespace V2_0 {
+namespace implementation {
+
+int64_t getTimeNow() {
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
+ std::chrono::system_clock::now().time_since_epoch())
+ .count();
+}
+
+ScopedWakelock::ScopedWakelock(IScopedWakelockRefCounter* refCounter, bool locked)
+ : mRefCounter(refCounter), mLocked(locked) {
+ if (mLocked) {
+ mLocked = mRefCounter->incrementRefCountAndMaybeAcquireWakelock(1, &mCreatedAtTimeNs);
+ }
+}
+
+ScopedWakelock::~ScopedWakelock() {
+ if (mLocked) {
+ mRefCounter->decrementRefCountAndMaybeReleaseWakelock(1, mCreatedAtTimeNs);
+ }
+}
+
+} // namespace implementation
+} // namespace V2_0
+} // namespace sensors
+} // namespace hardware
+} // namespace android
\ No newline at end of file
diff --git a/sensors/2.0/multihal/android.hardware.sensors@2.0-service-multihal.rc b/sensors/2.0/multihal/android.hardware.sensors@2.0-service-multihal.rc
index 1671689..a4da3b0 100644
--- a/sensors/2.0/multihal/android.hardware.sensors@2.0-service-multihal.rc
+++ b/sensors/2.0/multihal/android.hardware.sensors@2.0-service-multihal.rc
@@ -1,6 +1,7 @@
service vendor.sensors-hal-2-0-multihal /vendor/bin/hw/android.hardware.sensors@2.0-service.multihal
class hal
user system
- group system
+ group system wakelock
+ writepid /dev/cpuset/system-background/tasks
capabilities BLOCK_SUSPEND
rlimit rtprio 10 10
diff --git a/sensors/2.0/multihal/include/HalProxy.h b/sensors/2.0/multihal/include/HalProxy.h
index 4ecb58b..6592afe 100644
--- a/sensors/2.0/multihal/include/HalProxy.h
+++ b/sensors/2.0/multihal/include/HalProxy.h
@@ -16,14 +16,24 @@
#pragma once
+#include "ScopedWakelock.h"
#include "SubHal.h"
#include <android/hardware/sensors/2.0/ISensors.h>
+#include <android/hardware/sensors/2.0/types.h>
#include <fmq/MessageQueue.h>
#include <hardware_legacy/power.h>
#include <hidl/MQDescriptor.h>
#include <hidl/Status.h>
+#include <atomic>
+#include <condition_variable>
+#include <map>
+#include <mutex>
+#include <queue>
+#include <thread>
+#include <utility>
+
namespace android {
namespace hardware {
namespace sensors {
@@ -39,13 +49,15 @@
using ::android::hardware::Return;
using ::android::hardware::Void;
-class HalProxy : public ISensors {
+class HalProxy : public ISensors, public IScopedWakelockRefCounter {
public:
using Event = ::android::hardware::sensors::V1_0::Event;
using OperationMode = ::android::hardware::sensors::V1_0::OperationMode;
using RateLevel = ::android::hardware::sensors::V1_0::RateLevel;
using Result = ::android::hardware::sensors::V1_0::Result;
+ using SensorInfo = ::android::hardware::sensors::V1_0::SensorInfo;
using SharedMemInfo = ::android::hardware::sensors::V1_0::SharedMemInfo;
+ using ISensorsSubHal = ::android::hardware::sensors::V2_0::implementation::ISensorsSubHal;
explicit HalProxy();
// Test only constructor.
@@ -91,6 +103,37 @@
Return<void> onDynamicSensorsDisconnected(const hidl_vec<int32_t>& dynamicSensorHandlesRemoved,
int32_t subHalIndex);
+ // Below methods are for HalProxyCallback
+
+ /**
+ * Post events to the event message queue if there is room to write them. Otherwise post the
+ * remaining events to a background thread for a blocking write with a kPendingWriteTimeoutNs
+ * timeout.
+ *
+ * @param events The list of events to post to the message queue.
+ * @param numWakeupEvents The number of wakeup events in events.
+ * @param wakelock The wakelock associated with this post of events.
+ */
+ void postEventsToMessageQueue(const std::vector<Event>& events, size_t numWakeupEvents,
+ ScopedWakelock wakelock);
+
+ /**
+ * Get the sensor info associated with that sensorHandle.
+ *
+ * @param sensorHandle The sensor handle.
+ *
+ * @return The sensor info object in the mapping.
+ */
+ const SensorInfo& getSensorInfo(uint32_t sensorHandle) { return mSensors[sensorHandle]; }
+
+ bool areThreadsRunning() { return mThreadsRun.load(); }
+
+ // Below methods are from IScopedWakelockRefCounter interface
+ bool incrementRefCountAndMaybeAcquireWakelock(size_t delta,
+ int64_t* timeoutStart = nullptr) override;
+
+ void decrementRefCountAndMaybeReleaseWakelock(size_t delta, int64_t timeoutStart = -1) override;
+
private:
using EventMessageQueue = MessageQueue<Event, kSynchronizedReadWrite>;
using WakeLockMessageQueue = MessageQueue<uint32_t, kSynchronizedReadWrite>;
@@ -120,14 +163,20 @@
*/
std::vector<ISensorsSubHal*> mSubHalList;
+ //! The list of subhal callbacks for each subhal where the indices correlate with mSubHalList
+ std::vector<const sp<IHalProxyCallback>> mSubHalCallbacks;
+
/**
- * List of SensorInfo objects that contains the sensor info from subhals as
+ * Map of sensor handles to SensorInfo objects that contains the sensor info from subhals as
* well as the modified sensor handle for the framework.
*
- * The subhal index is encoded in the first byte of the sensor handle and
- * the remaining bytes are generated by the subhal to identify the sensor.
+ * The subhal index is encoded in the first byte of the sensor handle and the remaining
+ * bytes are generated by the subhal to identify the sensor.
*/
- std::vector<SensorInfo> mSensorList;
+ std::map<uint32_t, SensorInfo> mSensors;
+
+ //! Map of the dynamic sensors that have been added to halproxy.
+ std::map<uint32_t, SensorInfo> mDynamicSensors;
//! The current operation mode for all subhals.
OperationMode mCurrentOperationMode = OperationMode::NORMAL;
@@ -135,22 +184,111 @@
//! The single subHal that supports directChannel reporting.
ISensorsSubHal* mDirectChannelSubHal = nullptr;
+ //! The timeout for each pending write on background thread for events.
+ static const int64_t kPendingWriteTimeoutNs = 5 * INT64_C(1000000000) /* 5 seconds */;
+
//! The bit mask used to get the subhal index from a sensor handle.
static constexpr uint32_t kSensorHandleSubHalIndexMask = 0xFF000000;
/**
+ * A FIFO queue of pairs of vector of events and the number of wakeup events in that vector
+ * which are waiting to be written to the events fmq in the background thread.
+ */
+ std::queue<std::pair<std::vector<Event>, size_t>> mPendingWriteEventsQueue;
+
+ //! The mutex protecting writing to the fmq and the pending events queue
+ std::mutex mEventQueueWriteMutex;
+
+ //! The condition variable waiting on pending write events to stack up
+ std::condition_variable mEventQueueWriteCV;
+
+ //! The thread object ptr that handles pending writes
+ std::thread mPendingWritesThread;
+
+ //! The thread object that handles wakelocks
+ std::thread mWakelockThread;
+
+ //! The bool indicating whether to end the threads started in initialize
+ std::atomic_bool mThreadsRun = true;
+
+ //! The mutex protecting access to the dynamic sensors added and removed methods.
+ std::mutex mDynamicSensorsMutex;
+
+ // WakelockRefCount membar vars below
+
+ //! The mutex protecting the wakelock refcount and subsequent wakelock releases and
+ //! acquisitions
+ std::recursive_mutex mWakelockMutex;
+
+ std::condition_variable_any mWakelockCV;
+
+ //! The refcount of how many ScopedWakelocks and pending wakeup events are active
+ size_t mWakelockRefCount = 0;
+
+ int64_t mWakelockTimeoutStartTime = getTimeNow();
+
+ int64_t mWakelockTimeoutResetTime = getTimeNow();
+
+ const char* kWakelockName = "SensorsMultiHal";
+
+ /**
* Initialize the list of SubHal objects in mSubHalList by reading from dynamic libraries
* listed in a config file.
*/
void initializeSubHalListFromConfigFile(const char* configFileName);
/**
+ * Initialize the HalProxyCallback vector using the list of subhals.
+ */
+ void initializeSubHalCallbacks();
+
+ /**
* Initialize the list of SensorInfo objects in mSensorList by getting sensors from each
* subhal.
*/
void initializeSensorList();
/**
+ * Calls the helper methods that all ctors use.
+ */
+ void init();
+
+ /**
+ * Starts the thread that handles pending writes to event fmq.
+ *
+ * @param halProxy The HalProxy object pointer.
+ */
+ static void startPendingWritesThread(HalProxy* halProxy);
+
+ //! Handles the pending writes on events to eventqueue.
+ void handlePendingWrites();
+
+ /**
+ * Starts the thread that handles decrementing the ref count on wakeup events processed by the
+ * framework and timing out wakelocks.
+ *
+ * @param halProxy The HalProxy object pointer.
+ */
+ static void startWakelockThread(HalProxy* halProxy);
+
+ //! Handles the wakelocks.
+ void handleWakelocks();
+
+ /**
+ * @param timeLeft The variable that should be set to the timeleft before timeout will occur or
+ * unmodified if timeout occurred.
+ *
+ * @return true if the shared wakelock has been held passed the timeout and should be released
+ */
+ bool sharedWakelockDidTimeout(int64_t* timeLeft);
+
+ /**
+ * Reset all the member variables associated with the wakelock ref count and maybe release
+ * the shared wakelock.
+ */
+ void resetSharedWakelock();
+
+ /**
* Clear direct channel flags if the HalProxy has already chosen a subhal as its direct channel
* subhal. Set the directChannelSubHal pointer to the subHal passed in if this is the first
* direct channel enabled sensor seen.
@@ -169,6 +307,16 @@
*/
ISensorsSubHal* getSubHalForSensorHandle(uint32_t sensorHandle);
+ /**
+ * Count the number of wakeup events in the first n events of the vector.
+ *
+ * @param events The vector of Event objects.
+ * @param n The end index not inclusive of events to consider.
+ *
+ * @return The number of wakeup events of the considered events.
+ */
+ size_t countNumWakeupEvents(const std::vector<Event>& events, size_t n);
+
/*
* Clear out the subhal index bytes from a sensorHandle.
*
@@ -177,6 +325,45 @@
* @return The modified version of the sensor handle.
*/
static uint32_t clearSubHalIndex(uint32_t sensorHandle);
+
+ /**
+ * @param sensorHandle The sensor handle to modify.
+ *
+ * @return true if subHalIndex byte of sensorHandle is zeroed.
+ */
+ static bool subHalIndexIsClear(uint32_t sensorHandle);
+};
+
+/**
+ * Callback class used to provide the HalProxy with the index of which subHal is invoking
+ */
+class HalProxyCallback : public IHalProxyCallback {
+ using SensorInfo = ::android::hardware::sensors::V1_0::SensorInfo;
+
+ public:
+ HalProxyCallback(HalProxy* halProxy, int32_t subHalIndex)
+ : mHalProxy(halProxy), mSubHalIndex(subHalIndex) {}
+
+ Return<void> onDynamicSensorsConnected(
+ const hidl_vec<SensorInfo>& dynamicSensorsAdded) override {
+ return mHalProxy->onDynamicSensorsConnected(dynamicSensorsAdded, mSubHalIndex);
+ }
+
+ Return<void> onDynamicSensorsDisconnected(
+ const hidl_vec<int32_t>& dynamicSensorHandlesRemoved) override {
+ return mHalProxy->onDynamicSensorsDisconnected(dynamicSensorHandlesRemoved, mSubHalIndex);
+ }
+
+ void postEvents(const std::vector<Event>& events, ScopedWakelock wakelock);
+
+ ScopedWakelock createScopedWakelock(bool lock);
+
+ private:
+ HalProxy* mHalProxy;
+ int32_t mSubHalIndex;
+
+ std::vector<Event> processEvents(const std::vector<Event>& events,
+ size_t* numWakeupEvents) const;
};
} // namespace implementation
diff --git a/sensors/2.0/multihal/include/ScopedWakelock.h b/sensors/2.0/multihal/include/ScopedWakelock.h
new file mode 100644
index 0000000..aa6d9db
--- /dev/null
+++ b/sensors/2.0/multihal/include/ScopedWakelock.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/hardware/sensors/2.0/types.h>
+
+namespace android {
+namespace hardware {
+namespace sensors {
+namespace V2_0 {
+namespace implementation {
+
+using ::android::hardware::sensors::V2_0::SensorTimeout;
+
+const int64_t kWakelockTimeoutNs =
+ static_cast<int64_t>(SensorTimeout::WAKE_LOCK_SECONDS) * INT64_C(1000000000);
+
+int64_t getTimeNow();
+
+class IScopedWakelockRefCounter : public RefBase {
+ public:
+ /**
+ * Increment the wakelock ref count and maybe acquire the shared wakelock if incrementing
+ * from 0 then return the time of incrementing back to caller.
+ *
+ * @param delta The amount to change ref count by.
+ * @param timeoutStart The ptr to the timestamp in ns that the increment occurred which will be
+ * set in the function or nullptr if not specified.
+ *
+ * @return true if successfully incremented the wakelock ref count.
+ */
+ virtual bool incrementRefCountAndMaybeAcquireWakelock(size_t delta,
+ int64_t* timeoutStart = nullptr) = 0;
+ /**
+ * Decrement the wakelock ref count and maybe release wakelock if ref count ends up 0.
+ *
+ * @param delta The amount to change ref count by.
+ * @param timeoutStart The timestamp in ns that the calling context kept track of when
+ * incrementing the ref count or -1 by default
+ */
+ virtual void decrementRefCountAndMaybeReleaseWakelock(size_t delta,
+ int64_t timeoutStart = -1) = 0;
+ // Virtual dtor needed for compilation success
+ virtual ~IScopedWakelockRefCounter(){};
+};
+
+/**
+ * Wrapper around wake lock acquisition functions (acquire/release_wake_lock) that provides a
+ * RAII-style mechanism for keeping a wake lock held for the duration of a scoped block.
+ * When a ScopedWakelock is created, it increments the reference count stored in the HalProxy
+ * for the sub-HALs specific wake lock, acquiring the wake lock if necessary. When the object goes
+ * out of scope, the ref count is decremented, potentially releasing the wake lock if no other
+ * references to the wake lock exist.
+ *
+ * This class is allocated through the createScopedWakelock callback inside the IHalProxyCallback
+ * provided to sub-HALs during initialization and should be used for all wake lock acquisition
+ * inside of the sub-HAL to ensure wake locks are not held indefinitely.
+ *
+ * The most prevalent use case for this class will be for posting events to the framework through
+ * the postEvents HalProxy callback. The expectation is that sub-HALs will create this
+ * ScopedWakelock through the createScopedWakelock upon receiving a sensor events. The lock boolean
+ * provided to createScopedWakelock will be set the according to whether the sensor events are
+ * from wakeup sensors. Then, the sub-HAL will perform any processing necessary before invoking the
+ * postEvents callback passing in the previously created ScopedWakelock. At this point, ownership
+ * of the object will be passed to the HalProxy that will then be responsible for ensuring any
+ * wake locks continue to be held, if necessary.
+ */
+class ScopedWakelock {
+ public:
+ ScopedWakelock(ScopedWakelock&&) = default;
+ ScopedWakelock& operator=(ScopedWakelock&&) = default;
+ virtual ~ScopedWakelock();
+
+ bool isLocked() const { return mLocked; }
+
+ private:
+ friend class HalProxyCallback;
+ IScopedWakelockRefCounter* mRefCounter;
+ int64_t mCreatedAtTimeNs;
+ bool mLocked;
+ ScopedWakelock(IScopedWakelockRefCounter* refCounter, bool locked);
+ ScopedWakelock(const ScopedWakelock&) = delete;
+ ScopedWakelock& operator=(const ScopedWakelock&) = delete;
+};
+
+} // namespace implementation
+} // namespace V2_0
+} // namespace sensors
+} // namespace hardware
+} // namespace android
\ No newline at end of file
diff --git a/sensors/2.0/multihal/include/SubHal.h b/sensors/2.0/multihal/include/SubHal.h
index e84cba5..e7eedaa 100644
--- a/sensors/2.0/multihal/include/SubHal.h
+++ b/sensors/2.0/multihal/include/SubHal.h
@@ -16,15 +16,13 @@
#pragma once
+#include "ScopedWakelock.h"
+
#include <android/hardware/sensors/1.0/types.h>
#include <android/hardware/sensors/2.0/ISensors.h>
#include <vector>
-using ::android::hardware::sensors::V1_0::Event;
-using ::android::hardware::sensors::V1_0::Result;
-using ::android::hardware::sensors::V1_0::SensorInfo;
-
// Indicates the current version of the multiHAL interface formatted as (HAL major version) << 24 |
// (HAL minor version) << 16 | (multiHAL version)
#define SUB_HAL_2_0_VERSION 0x02000000
@@ -35,44 +33,9 @@
namespace V2_0 {
namespace implementation {
-/**
- * Wrapper around wake lock acquisition functions (acquire/release_wake_lock) that provides a
- * RAII-style mechanism for keeping a wake lock held for the duration of a scoped block.
- * When a ScopedWakelock is created, it increments the reference count stored in the HalProxy
- * for the sub-HALs specific wake lock, acquiring the wake lock if necessary. When the object goes
- * out of scope, the ref count is decremented, potentially releasing the wake lock if no other
- * references to the wake lock exist.
- *
- * This class is allocated through the createScopedWakelock callback inside the IHalProxyCallback
- * provided to sub-HALs during initialization and should be used for all wake lock acquisition
- * inside of the sub-HAL to ensure wake locks are not held indefinitely.
- *
- * The most prevalent use case for this class will be for posting events to the framework through
- * the postEvents HalProxy callback. The expectation is that sub-HALs will create this
- * ScopedWakelock through the createScopedWakelock upon receiving a sensor events. The lock boolean
- * provided to createScopedWakelock will be set the according to whether the sensor events are
- * from wakeup sensors. Then, the sub-HAL will perform any processing necessary before invoking the
- * postEvents callback passing in the previously created ScopedWakelock. At this point, ownership
- * of the object will be passed to the HalProxy that will then be responsible for ensuring any
- * wake locks continue to be held, if necessary.
- */
-class ScopedWakelock {
- public:
- ScopedWakelock(ScopedWakelock&&) = default;
- ScopedWakelock& operator=(ScopedWakelock&&) = default;
- virtual ~ScopedWakelock() { mLocked = false; };
-
- bool isLocked() const { return mLocked; }
-
- protected:
- bool mLocked;
-
- private:
- // TODO: Mark HalProxy's subclass of ScopedWakelock as a friend so that it can be initialized.
- ScopedWakelock();
- ScopedWakelock(const ScopedWakelock&) = delete;
- ScopedWakelock& operator=(const ScopedWakelock&) = delete;
-};
+using ::android::hardware::sensors::V1_0::Event;
+using ::android::hardware::sensors::V1_0::Result;
+using ::android::hardware::sensors::V1_0::SensorInfo;
/**
* Interface that contains several callbacks into the HalProxy class to communicate dynamic sensor
@@ -169,7 +132,9 @@
/**
* First method invoked on the sub-HAL after it's allocated through sensorsHalGetSubHal() by the
* HalProxy. Sub-HALs should use this to initialize any state and retain the callback given in
- * order to communicate with the HalProxy.
+ * order to communicate with the HalProxy. Method will be called anytime the sensors framework
+ * restarts. Therefore, this method will be responsible for reseting the state of the subhal and
+ * cleaning up and reallocating any previously allocated data.
*
* @param halProxyCallback callback used to inform the HalProxy when a dynamic sensor's state
* changes, new sensor events should be sent to the framework, and when a new ScopedWakelock
diff --git a/sensors/2.0/multihal/tests/Android.bp b/sensors/2.0/multihal/tests/Android.bp
index 21ceb0c..e3a4af1 100644
--- a/sensors/2.0/multihal/tests/Android.bp
+++ b/sensors/2.0/multihal/tests/Android.bp
@@ -28,11 +28,16 @@
"libcutils",
"libfmq",
"libhidlbase",
- "libhidltransport",
"liblog",
"libpower",
"libutils",
],
+ static_libs: [
+ "android.hardware.sensors@2.0-HalProxy",
+ ],
+ cflags: [
+ "-DLOG_TAG=\"FakeSubHal\""
+ ],
}
cc_library {
@@ -80,10 +85,12 @@
"libcutils",
"libfmq",
"libhidlbase",
- "libhidltransport",
"liblog",
"libpower",
"libutils",
],
test_suites: ["device-tests"],
+ cflags: [
+ "-DLOG_TAG=\"HalProxyUnitTests\"",
+ ],
}
diff --git a/sensors/2.0/multihal/tests/HalProxy_test.cpp b/sensors/2.0/multihal/tests/HalProxy_test.cpp
index 1e1f9e9..fa527c9 100644
--- a/sensors/2.0/multihal/tests/HalProxy_test.cpp
+++ b/sensors/2.0/multihal/tests/HalProxy_test.cpp
@@ -16,18 +16,33 @@
#include <gtest/gtest.h>
#include <android/hardware/sensors/2.0/types.h>
+#include <fmq/MessageQueue.h>
#include "HalProxy.h"
+#include "ScopedWakelock.h"
#include "SensorsSubHal.h"
+#include <chrono>
+#include <set>
+#include <thread>
#include <vector>
namespace {
+using ::android::hardware::EventFlag;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::MessageQueue;
+using ::android::hardware::Return;
+using ::android::hardware::sensors::V1_0::EventPayload;
using ::android::hardware::sensors::V1_0::SensorFlagBits;
using ::android::hardware::sensors::V1_0::SensorInfo;
using ::android::hardware::sensors::V1_0::SensorType;
+using ::android::hardware::sensors::V2_0::EventQueueFlagBits;
+using ::android::hardware::sensors::V2_0::ISensorsCallback;
+using ::android::hardware::sensors::V2_0::WakeLockQueueFlagBits;
using ::android::hardware::sensors::V2_0::implementation::HalProxy;
+using ::android::hardware::sensors::V2_0::implementation::HalProxyCallback;
+using ::android::hardware::sensors::V2_0::subhal::implementation::AddAndRemoveDynamicSensorsSubHal;
using ::android::hardware::sensors::V2_0::subhal::implementation::AllSensorsSubHal;
using ::android::hardware::sensors::V2_0::subhal::implementation::
AllSupportDirectChannelSensorsSubHal;
@@ -36,10 +51,56 @@
DoesNotSupportDirectChannelSensorsSubHal;
using ::android::hardware::sensors::V2_0::subhal::implementation::OnChangeSensorsSubHal;
using ::android::hardware::sensors::V2_0::subhal::implementation::SensorsSubHal;
-
using ::android::hardware::sensors::V2_0::subhal::implementation::
SetOperationModeFailingSensorsSubHal;
+using EventMessageQueue = MessageQueue<Event, ::android::hardware::kSynchronizedReadWrite>;
+using WakeupMessageQueue = MessageQueue<uint32_t, ::android::hardware::kSynchronizedReadWrite>;
+
+// The barebones sensors callback class passed into halproxy initialize calls
+class SensorsCallback : public ISensorsCallback {
+ public:
+ Return<void> onDynamicSensorsConnected(
+ const hidl_vec<SensorInfo>& /*dynamicSensorsAdded*/) override {
+ // Nothing yet
+ return Return<void>();
+ }
+
+ Return<void> onDynamicSensorsDisconnected(
+ const hidl_vec<int32_t>& /*dynamicSensorHandlesRemoved*/) override {
+ // Nothing yet
+ return Return<void>();
+ }
+};
+
+// The sensors callback that expects a variable list of sensors to be added
+class TestSensorsCallback : public ISensorsCallback {
+ public:
+ Return<void> onDynamicSensorsConnected(
+ const hidl_vec<SensorInfo>& dynamicSensorsAdded) override {
+ mSensorsConnected.insert(mSensorsConnected.end(), dynamicSensorsAdded.begin(),
+ dynamicSensorsAdded.end());
+ return Return<void>();
+ }
+
+ Return<void> onDynamicSensorsDisconnected(
+ const hidl_vec<int32_t>& dynamicSensorHandlesRemoved) override {
+ mSensorHandlesDisconnected.insert(mSensorHandlesDisconnected.end(),
+ dynamicSensorHandlesRemoved.begin(),
+ dynamicSensorHandlesRemoved.end());
+ return Return<void>();
+ }
+
+ const std::vector<SensorInfo>& getSensorsConnected() const { return mSensorsConnected; }
+ const std::vector<int32_t>& getSensorHandlesDisconnected() const {
+ return mSensorHandlesDisconnected;
+ }
+
+ private:
+ std::vector<SensorInfo> mSensorsConnected;
+ std::vector<int32_t> mSensorHandlesDisconnected;
+};
+
// Helper declarations follow
/**
@@ -65,6 +126,66 @@
void testSensorsListForOneDirectChannelEnabledSubHal(const std::vector<SensorInfo>& sensorsList,
size_t enabledSubHalIndex);
+void ackWakeupEventsToHalProxy(size_t numEvents, std::unique_ptr<WakeupMessageQueue>& wakelockQueue,
+ EventFlag* wakelockQueueFlag);
+
+bool readEventsOutOfQueue(size_t numEvents, std::unique_ptr<EventMessageQueue>& eventQueue,
+ EventFlag* eventQueueFlag);
+
+std::unique_ptr<EventMessageQueue> makeEventFMQ(size_t size);
+
+std::unique_ptr<WakeupMessageQueue> makeWakelockFMQ(size_t size);
+
+/**
+ * Construct and return a HIDL Event type thats sensorHandle refers to a proximity sensor
+ * which is a wakeup type sensor.
+ *
+ * @return A proximity event.
+ */
+Event makeProximityEvent();
+
+/**
+ * Construct and return a HIDL Event type thats sensorHandle refers to a proximity sensor
+ * which is a wakeup type sensor.
+ *
+ * @return A proximity event.
+ */
+Event makeAccelerometerEvent();
+
+/**
+ * Make a certain number of proximity type events with the sensorHandle field set to
+ * the proper number for AllSensorsSubHal subhal type.
+ *
+ * @param numEvents The number of events to make.
+ *
+ * @return The created list of events.
+ */
+std::vector<Event> makeMultipleProximityEvents(size_t numEvents);
+
+/**
+ * Make a certain number of accelerometer type events with the sensorHandle field set to
+ * the proper number for AllSensorsSubHal subhal type.
+ *
+ * @param numEvents The number of events to make.
+ *
+ * @return The created list of events.
+ */
+std::vector<Event> makeMultipleAccelerometerEvents(size_t numEvents);
+
+/**
+ * Given a SensorInfo vector and a sensor handles vector populate 'sensors' with SensorInfo
+ * objects that have the sensorHandle property set to int32_ts from start to start + size
+ * (exclusive) and push those sensorHandles also onto 'sensorHandles'.
+ *
+ * @param start The starting sensorHandle value.
+ * @param size The ending (not included) sensorHandle value.
+ * @param sensors The SensorInfo object vector reference to push_back to.
+ * @param sensorHandles The sensor handles int32_t vector reference to push_back to.
+ */
+void makeSensorsAndSensorHandlesStartingAndOfSize(int32_t start, size_t size,
+ std::vector<SensorInfo>& sensors,
+ std::vector<int32_t>& sensorHandles);
+
// Tests follow
TEST(HalProxyTest, GetSensorsListOneSubHalTest) {
AllSensorsSubHal subHal;
@@ -156,6 +277,260 @@
});
}
+TEST(HalProxyTest, PostSingleNonWakeupEvent) {
+ constexpr size_t kQueueSize = 5;
+ AllSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ std::vector<Event> events{makeAccelerometerEvent()};
+ subHal.postEvents(events, false /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), 1);
+}
+
+TEST(HalProxyTest, PostMultipleNonWakeupEvent) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 3;
+ AllSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ std::vector<Event> events = makeMultipleAccelerometerEvents(kNumEvents);
+ subHal.postEvents(events, false /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), kNumEvents);
+}
+
+TEST(HalProxyTest, PostSingleWakeupEvent) {
+ constexpr size_t kQueueSize = 5;
+ AllSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ EventFlag* eventQueueFlag;
+ EventFlag::createEventFlag(eventQueue->getEventFlagWord(), &eventQueueFlag);
+
+ EventFlag* wakelockQueueFlag;
+ EventFlag::createEventFlag(wakeLockQueue->getEventFlagWord(), &wakelockQueueFlag);
+
+ std::vector<Event> events{makeProximityEvent()};
+ subHal.postEvents(events, true /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), 1);
+
+ readEventsOutOfQueue(1, eventQueue, eventQueueFlag);
+ ackWakeupEventsToHalProxy(1, wakeLockQueue, wakelockQueueFlag);
+}
+
+TEST(HalProxyTest, PostMultipleWakeupEvents) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 3;
+ AllSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ EventFlag* eventQueueFlag;
+ EventFlag::createEventFlag(eventQueue->getEventFlagWord(), &eventQueueFlag);
+
+ EventFlag* wakelockQueueFlag;
+ EventFlag::createEventFlag(wakeLockQueue->getEventFlagWord(), &wakelockQueueFlag);
+
+ std::vector<Event> events = makeMultipleProximityEvents(kNumEvents);
+ subHal.postEvents(events, true /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), kNumEvents);
+
+ readEventsOutOfQueue(kNumEvents, eventQueue, eventQueueFlag);
+ ackWakeupEventsToHalProxy(kNumEvents, wakeLockQueue, wakelockQueueFlag);
+}
+
+TEST(HalProxyTest, PostEventsMultipleSubhals) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 2;
+ AllSensorsSubHal subHal1, subHal2;
+ std::vector<ISensorsSubHal*> subHals{&subHal1, &subHal2};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ std::vector<Event> events = makeMultipleAccelerometerEvents(kNumEvents);
+ subHal1.postEvents(events, false /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), kNumEvents);
+
+ subHal2.postEvents(events, false /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), kNumEvents * 2);
+}
+
+TEST(HalProxyTest, PostEventsDelayedWrite) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 6;
+ AllSensorsSubHal subHal1, subHal2;
+ std::vector<ISensorsSubHal*> subHals{&subHal1, &subHal2};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ EventFlag* eventQueueFlag;
+ EventFlag::createEventFlag(eventQueue->getEventFlagWord(), &eventQueueFlag);
+
+ std::vector<Event> events = makeMultipleAccelerometerEvents(kNumEvents);
+ subHal1.postEvents(events, false /* wakeup */);
+
+ EXPECT_EQ(eventQueue->availableToRead(), kQueueSize);
+
+ // readblock a full queue size worth of events out of queue, timeout for half a second
+ EXPECT_TRUE(readEventsOutOfQueue(kQueueSize, eventQueue, eventQueueFlag));
+
+ // proxy background thread should have wrote remaining events when it saw space
+ EXPECT_TRUE(readEventsOutOfQueue(kNumEvents - kQueueSize, eventQueue, eventQueueFlag));
+
+ EXPECT_EQ(eventQueue->availableToRead(), 0);
+}
+
+TEST(HalProxyTest, PostEventsMultipleSubhalsThreaded) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 2;
+ AllSensorsSubHal subHal1, subHal2;
+ std::vector<ISensorsSubHal*> subHals{&subHal1, &subHal2};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ std::vector<Event> events = makeMultipleAccelerometerEvents(kNumEvents);
+
+ std::thread t1(&AllSensorsSubHal::postEvents, &subHal1, events, false);
+ std::thread t2(&AllSensorsSubHal::postEvents, &subHal2, events, false);
+
+ t1.join();
+ t2.join();
+
+ EXPECT_EQ(eventQueue->availableToRead(), kNumEvents * 2);
+}
+
+TEST(HalProxyTest, DestructingWithEventsPendingOnBackgroundThreadTest) {
+ constexpr size_t kQueueSize = 5;
+ constexpr size_t kNumEvents = 6;
+ AllSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(kQueueSize);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(kQueueSize);
+ ::android::sp<ISensorsCallback> callback = new SensorsCallback();
+ HalProxy proxy(subHals);
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callback);
+
+ std::vector<Event> events = makeMultipleAccelerometerEvents(kNumEvents);
+ subHal.postEvents(events, false /* wakeup */);
+
+ // Sleep for a half second so that background thread has time to attempt it's blocking write
+ std::this_thread::sleep_for(std::chrono::milliseconds(500));
+
+ // Should see a 5 second wait for blocking write timeout here
+
+ // Should be one events left on pending writes queue here and proxy will destruct
+ // If this TEST completes then it was a success, if it hangs we will see a crash
+}
+
+TEST(HalProxyTest, DynamicSensorsConnectedTest) {
+ constexpr size_t kNumSensors = 3;
+ AddAndRemoveDynamicSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(0);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(0);
+
+ std::vector<SensorInfo> sensorsToConnect;
+ std::vector<int32_t> sensorHandlesToExpect;
+ makeSensorsAndSensorHandlesStartingAndOfSize(1, kNumSensors, sensorsToConnect,
+ sensorHandlesToExpect);
+
+ TestSensorsCallback* callback = new TestSensorsCallback();
+ ::android::sp<ISensorsCallback> callbackPtr = callback;
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callbackPtr);
+ subHal.addDynamicSensors(sensorsToConnect);
+
+ std::vector<SensorInfo> sensorsSeen = callback->getSensorsConnected();
+ EXPECT_EQ(kNumSensors, sensorsSeen.size());
+ for (size_t i = 0; i < kNumSensors; i++) {
+ auto sensorHandleSeen = sensorsSeen[i].sensorHandle;
+ // Note since only one subhal we do not need to change first byte for expected
+ auto sensorHandleExpected = sensorHandlesToExpect[i];
+ EXPECT_EQ(sensorHandleSeen, sensorHandleExpected);
+ }
+}
+
+TEST(HalProxyTest, DynamicSensorsDisconnectedTest) {
+ constexpr size_t kNumSensors = 3;
+ AddAndRemoveDynamicSensorsSubHal subHal;
+ std::vector<ISensorsSubHal*> subHals{&subHal};
+ HalProxy proxy(subHals);
+ std::unique_ptr<EventMessageQueue> eventQueue = makeEventFMQ(0);
+ std::unique_ptr<WakeupMessageQueue> wakeLockQueue = makeWakelockFMQ(0);
+
+ std::vector<SensorInfo> sensorsToConnect;
+ std::vector<int32_t> sensorHandlesToExpect;
+ makeSensorsAndSensorHandlesStartingAndOfSize(20, kNumSensors, sensorsToConnect,
+ sensorHandlesToExpect);
+
+ std::vector<int32_t> nonDynamicSensorHandles;
+ for (int32_t sensorHandle = 1; sensorHandle < 10; sensorHandle++) {
+ nonDynamicSensorHandles.push_back(sensorHandle);
+ }
+
+ std::set<int32_t> nonDynamicSensorHandlesSet(nonDynamicSensorHandles.begin(),
+ nonDynamicSensorHandles.end());
+
+ std::vector<int32_t> sensorHandlesToAttemptToRemove;
+ sensorHandlesToAttemptToRemove.insert(sensorHandlesToAttemptToRemove.end(),
+ sensorHandlesToExpect.begin(),
+ sensorHandlesToExpect.end());
+ sensorHandlesToAttemptToRemove.insert(sensorHandlesToAttemptToRemove.end(),
+ nonDynamicSensorHandles.begin(),
+ nonDynamicSensorHandles.end());
+
+ TestSensorsCallback* callback = new TestSensorsCallback();
+ ::android::sp<ISensorsCallback> callbackPtr = callback;
+ proxy.initialize(*eventQueue->getDesc(), *wakeLockQueue->getDesc(), callbackPtr);
+ subHal.addDynamicSensors(sensorsToConnect);
+ subHal.removeDynamicSensors(sensorHandlesToAttemptToRemove);
+
+ std::vector<int32_t> sensorHandlesSeen = callback->getSensorHandlesDisconnected();
+ EXPECT_EQ(kNumSensors, sensorHandlesSeen.size());
+ for (size_t i = 0; i < kNumSensors; i++) {
+ auto sensorHandleSeen = sensorHandlesSeen[i];
+ // Note since only one subhal we do not need to change first byte for expected
+ auto sensorHandleExpected = sensorHandlesToExpect[i];
+ EXPECT_EQ(sensorHandleSeen, sensorHandleExpected);
+ EXPECT_TRUE(nonDynamicSensorHandlesSet.find(sensorHandleSeen) ==
+ nonDynamicSensorHandlesSet.end());
+ }
+}
+
// Helper implementations follow
void testSensorsListFromProxyAndSubHal(const std::vector<SensorInfo>& proxySensorsList,
const std::vector<SensorInfo>& subHalSensorsList) {
@@ -187,4 +562,79 @@
}
}
+void ackWakeupEventsToHalProxy(size_t numEvents, std::unique_ptr<WakeupMessageQueue>& wakelockQueue,
+ EventFlag* wakelockQueueFlag) {
+ uint32_t numEventsUInt32 = static_cast<uint32_t>(numEvents);
+ wakelockQueue->write(&numEventsUInt32);
+ wakelockQueueFlag->wake(static_cast<uint32_t>(WakeLockQueueFlagBits::DATA_WRITTEN));
+}
+
+bool readEventsOutOfQueue(size_t numEvents, std::unique_ptr<EventMessageQueue>& eventQueue,
+ EventFlag* eventQueueFlag) {
+ constexpr int64_t kReadBlockingTimeout = INT64_C(500000000);
+ std::vector<Event> events(numEvents);
+ return eventQueue->readBlocking(events.data(), numEvents,
+ static_cast<uint32_t>(EventQueueFlagBits::EVENTS_READ),
+ static_cast<uint32_t>(EventQueueFlagBits::READ_AND_PROCESS),
+ kReadBlockingTimeout, eventQueueFlag);
+}
+
+std::unique_ptr<EventMessageQueue> makeEventFMQ(size_t size) {
+ return std::make_unique<EventMessageQueue>(size, true);
+}
+
+std::unique_ptr<WakeupMessageQueue> makeWakelockFMQ(size_t size) {
+ return std::make_unique<WakeupMessageQueue>(size, true);
+}
+
+Event makeProximityEvent() {
+ Event event;
+ event.timestamp = 0xFF00FF00;
+ // This is the sensorhandle of proximity, which is wakeup type
+ event.sensorHandle = 0x00000008;
+ event.sensorType = SensorType::PROXIMITY;
+ event.u = EventPayload();
+ return event;
+}
+
+Event makeAccelerometerEvent() {
+ Event event;
+ event.timestamp = 0xFF00FF00;
+ // This is the sensorhandle of proximity, which is wakeup type
+ event.sensorHandle = 0x00000001;
+ event.sensorType = SensorType::ACCELEROMETER;
+ event.u = EventPayload();
+ return event;
+}
+
+std::vector<Event> makeMultipleProximityEvents(size_t numEvents) {
+ std::vector<Event> events;
+ for (size_t i = 0; i < numEvents; i++) {
+ events.push_back(makeProximityEvent());
+ }
+ return events;
+}
+
+std::vector<Event> makeMultipleAccelerometerEvents(size_t numEvents) {
+ std::vector<Event> events;
+ for (size_t i = 0; i < numEvents; i++) {
+ events.push_back(makeAccelerometerEvent());
+ }
+ return events;
+}
+
+void makeSensorsAndSensorHandlesStartingAndOfSize(int32_t start, size_t size,
+ std::vector<SensorInfo>& sensors,
+ std::vector<int32_t>& sensorHandles) {
+ for (int32_t sensorHandle = start; sensorHandle < start + static_cast<int32_t>(size);
+ sensorHandle++) {
+ SensorInfo sensor;
+ // Just set the sensorHandle field to the correct value so as to not have
+ // to compare every field
+ sensor.sensorHandle = sensorHandle;
+ sensors.push_back(sensor);
+ sensorHandles.push_back(sensorHandle);
+ }
+}
+
} // namespace
diff --git a/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.cpp b/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.cpp
index d581c49..cf3ae75 100644
--- a/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.cpp
+++ b/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.cpp
@@ -221,6 +221,16 @@
return Void();
}
+void AddAndRemoveDynamicSensorsSubHal::addDynamicSensors(
+ const std::vector<SensorInfo>& sensorsAdded) {
+ mCallback->onDynamicSensorsConnected(sensorsAdded);
+}
+
+void AddAndRemoveDynamicSensorsSubHal::removeDynamicSensors(
+ const std::vector<int32_t>& sensorHandlesRemoved) {
+ mCallback->onDynamicSensorsDisconnected(sensorHandlesRemoved);
+}
+
} // namespace implementation
} // namespace subhal
} // namespace V2_0
diff --git a/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.h b/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.h
index 61caa2c..c1e3647 100644
--- a/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.h
+++ b/sensors/2.0/multihal/tests/fake_subhal/SensorsSubHal.h
@@ -98,13 +98,6 @@
*/
std::map<int32_t, std::shared_ptr<Sensor>> mSensors;
- private:
- /**
- * The current operation mode of the multihal framework. Ensures that all subhals are set to
- * the same operation mode.
- */
- OperationMode mCurrentOperationMode = OperationMode::NORMAL;
-
/**
* Callback used to communicate to the HalProxy when dynamic sensors are connected /
* disconnected, sensor events need to be sent to the framework, and when a wakelock should be
@@ -112,6 +105,13 @@
*/
sp<IHalProxyCallback> mCallback;
+ private:
+ /**
+ * The current operation mode of the multihal framework. Ensures that all subhals are set to
+ * the same operation mode.
+ */
+ OperationMode mCurrentOperationMode = OperationMode::NORMAL;
+
/**
* The next available sensor handle
*/
@@ -151,6 +151,12 @@
Return<void> getSensorsList(getSensorsList_cb _hidl_cb) override;
};
+class AddAndRemoveDynamicSensorsSubHal : public AllSensorsSubHal {
+ public:
+ void addDynamicSensors(const std::vector<SensorInfo>& sensorsAdded);
+ void removeDynamicSensors(const std::vector<int32_t>& sensorHandlesAdded);
+};
+
} // namespace implementation
} // namespace subhal
} // namespace V2_0
diff --git a/tests/baz/1.0/IBaz.hal b/tests/baz/1.0/IBaz.hal
index 91ed1f2..7855446 100644
--- a/tests/baz/1.0/IBaz.hal
+++ b/tests/baz/1.0/IBaz.hal
@@ -29,6 +29,11 @@
VALL = V0 | V1 | V2 | V3,
};
+ struct BitFieldTester {
+ bitfield<BitField> scalar;
+ vec<bitfield<BitField>> vector;
+ };
+
enum SomeOtherEnum : uint8_t {
bar = 66
};
@@ -108,6 +113,7 @@
haveSomeStrings(string[3] array) generates (string[2] result);
haveAStringVec(vec<string> vector) generates (vec<string> result);
+ repeatBitfieldVec(vec<bitfield<BitField>> vector) generates (vec<bitfield<BitField>> result);
returnABunchOfStrings() generates (string a, string b, string c);
diff --git a/tests/baz/1.0/default/Baz.cpp b/tests/baz/1.0/default/Baz.cpp
index e118122..2ce096c 100644
--- a/tests/baz/1.0/default/Baz.cpp
+++ b/tests/baz/1.0/default/Baz.cpp
@@ -364,6 +364,12 @@
return Void();
}
+Return<void> Baz::repeatBitfieldVec(const hidl_vec<uint8_t>& vector,
+ repeatBitfieldVec_cb _hidl_cb) {
+ _hidl_cb(vector);
+ return Void();
+}
+
Return<void> Baz::returnABunchOfStrings(returnABunchOfStrings_cb _hidl_cb) {
hidl_string eins; eins = "Eins";
hidl_string zwei; zwei = "Zwei";
diff --git a/tests/baz/1.0/default/Baz.h b/tests/baz/1.0/default/Baz.h
index c264f47..1e24d52 100644
--- a/tests/baz/1.0/default/Baz.h
+++ b/tests/baz/1.0/default/Baz.h
@@ -86,6 +86,8 @@
haveSomeStrings_cb _hidl_cb) override;
Return<void> haveAStringVec(const hidl_vec<hidl_string>& vector,
haveAStringVec_cb _hidl_cb) override;
+ Return<void> repeatBitfieldVec(const hidl_vec<uint8_t>& vector,
+ repeatBitfieldVec_cb _hidl_cb) override;
Return<void> returnABunchOfStrings(returnABunchOfStrings_cb _hidl_cb) override;
Return<uint8_t> returnABitField() override;
Return<uint32_t> size(uint32_t size) override;
diff --git a/tv/tuner/1.0/Android.bp b/tv/tuner/1.0/Android.bp
index 986518b..09265f7 100644
--- a/tv/tuner/1.0/Android.bp
+++ b/tv/tuner/1.0/Android.bp
@@ -13,6 +13,7 @@
"IDescrambler.hal",
"IFrontend.hal",
"IFrontendCallback.hal",
+ "ILnb.hal",
"ITuner.hal",
],
interfaces: [
diff --git a/tv/tuner/1.0/IDemux.hal b/tv/tuner/1.0/IDemux.hal
index 2d7b275..7fd7e26 100644
--- a/tv/tuner/1.0/IDemux.hal
+++ b/tv/tuner/1.0/IDemux.hal
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package android.hardware.tv.tuner@1.0;
import IDemuxCallback;
@@ -46,9 +62,9 @@
*
* It is used by the client to get the descriptor of the filter's Fast
* Message Queue. The data in FMQ is filtered out from MPEG transport
- * stream. The data is origanized to data blocks which may have
+ * stream. The data is organized to data blocks which may have
* different length. The length's information of one or multiple data blocks
- * is sent to client throught DemuxFilterEvent.
+ * is sent to client through DemuxFilterEvent.
*
* @param filterId the ID of the filter.
* @return result Result status of the operation.
@@ -81,7 +97,7 @@
/**
* Start the filter.
*
- * It is used by the client to ask the filter to start filterring data.
+ * It is used by the client to ask the filter to start filtering data.
*
* @param filterId the ID of the filter.
* @return result Result status of the operation.
@@ -180,5 +196,220 @@
* UNKNOWN_ERROR if failed for other reasons.
*/
close() generates (Result result);
-};
+ /**
+ * Add output to the demux
+ *
+ * It is used by the client to record output data from selected filters.
+ *
+ * @param bufferSize the buffer size of the output to be added. It's used to
+ * create a FMQ(Fast Message Queue) to hold data from selected filters.
+ * @param cb the callback for the demux to be used to send notifications
+ * back to the client.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * OUT_OF_MEMORY if failed for not enough memory.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ addOutput(uint32_t bufferSize, IDemuxCallback cb) generates (Result result);
+
+ /**
+ * Get the descriptor of the output's FMQ
+ *
+ * It is used by the client to get the descriptor of the output's Fast
+ * Message Queue. The data in FMQ is muxed packets output from selected
+ * filters. The packet's format is specified by DemuxDataFormat in
+ * DemuxOutputSettings.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if failed for other reasons.
+ * @return queue the descriptor of the output's FMQ
+ */
+ getOutputQueueDesc() generates (Result result, fmq_sync<uint8_t> queue);
+
+ /**
+ * Configure the demux's output.
+ *
+ * It is used by the client to configure the demux's output for recording.
+ *
+ * @param settings the settings of the demux's output.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ configureOutput(DemuxOutputSettings settings) generates (Result result);
+
+ /**
+ * Attach one filter to the demux's output.
+ *
+ * It is used by the client to mux one filter's output to demux's output.
+ *
+ * @param filterId the ID of the attached filter.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ attachOutputFilter(DemuxFilterId filterId) generates (Result result);
+
+ /**
+ * Detach one filter from the demux's output.
+ *
+ * It is used by the client to remove one filter's output from demux's
+ * output.
+ *
+ * @param filterId the ID of the detached filter.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ detachOutputFilter(DemuxFilterId filterId) generates (Result result);
+
+ /**
+ * Start to take data to the demux's output.
+ *
+ * It is used by the client to ask the output to start to take data from
+ * attached filters.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ startOutput() generates (Result result);
+
+ /**
+ * Stop to take data to the demux's output.
+ *
+ * It is used by the client to ask the output to stop to take data from
+ * attached filters.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ stopOutput() generates (Result result);
+
+ /**
+ * Flush unconsumed data in the demux's output.
+ *
+ * It is used by the client to ask the demux to flush the data which is
+ * already produced but not consumed yet in the demux's output.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ flushOutput() generates (Result result);
+
+ /**
+ * Remove the demux's output.
+ *
+ * It is used by the client to remove the demux's output.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ removeOutput() generates (Result result);
+
+ /**
+ * Add input to the demux
+ *
+ * It is used by the client to add the demux's input for playback content.
+ *
+ * @param bufferSize the buffer size of the demux's input to be added.
+ * It's used to create a FMQ(Fast Message Queue) to hold input data.
+ * @param cb the callback for the demux to be used to send notifications
+ * back to the client.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * OUT_OF_MEMORY if failed for not enough memory.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ addInput(uint32_t bufferSize, IDemuxCallback cb) generates (Result result);
+
+ /**
+ * Get the descriptor of the input's FMQ
+ *
+ * It is used by the client to get the descriptor of the input's Fast
+ * Message Queue. The data in FMQ is fed by client. Data format is specifed
+ * by DemuxDataFormat in DemuxInputSettings.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if failed for other reasons.
+ * @return queue the descriptor of the output's FMQ
+ */
+ getInputQueueDesc() generates (Result result, fmq_sync<uint8_t> queue);
+
+ /**
+ * Configure the demux's input.
+ *
+ * It is used by the client to configure the demux's input for playback.
+ *
+ * @param settings the settings of the demux's input.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ configureInput(DemuxInputSettings settings) generates (Result result);
+
+ /**
+ * Start to consume the data from the demux's input.
+ *
+ * It is used by the client to ask the demux to start to consume data from
+ * the demux's input.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ startInput() generates (Result result);
+
+ /**
+ * Stop to consume the data from the demux's input.
+ *
+ * It is used by the client to ask the demux to stop to consume data from
+ * the demux's input.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ stopInput() generates (Result result);
+
+ /**
+ * Flush unconsumed data in the demux's input.
+ *
+ * It is used by the client to ask the demux to flush the data which is
+ * already produced but not consumed yet in the demux's input.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ flushInput() generates (Result result);
+
+ /**
+ * Remove the demux's input.
+ *
+ * It is used by the client to remove the demux's input.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if failed for wrong state.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ removeInput() generates (Result result);
+};
diff --git a/tv/tuner/1.0/IDemuxCallback.hal b/tv/tuner/1.0/IDemuxCallback.hal
index 7efd2c3..7bce9ef 100644
--- a/tv/tuner/1.0/IDemuxCallback.hal
+++ b/tv/tuner/1.0/IDemuxCallback.hal
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package android.hardware.tv.tuner@1.0;
interface IDemuxCallback {
@@ -15,5 +31,19 @@
* @param status a new status of the demux filter.
*/
oneway onFilterStatus(DemuxFilterId filterId, DemuxFilterStatus status);
+
+ /**
+ * Notify the client a new status of the demux's output.
+ *
+ * @param status a new status of the demux's output.
+ */
+ oneway onOutputStatus(DemuxOutputStatus status);
+
+ /**
+ * Notify the client a new status of the demux's input.
+ *
+ * @param status a new status of the demux's input.
+ */
+ oneway onInputStatus(DemuxInputStatus status);
};
diff --git a/tv/tuner/1.0/IDescrambler.hal b/tv/tuner/1.0/IDescrambler.hal
index d078657..61ff1df 100644
--- a/tv/tuner/1.0/IDescrambler.hal
+++ b/tv/tuner/1.0/IDescrambler.hal
@@ -1,3 +1,19 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package android.hardware.tv.tuner@1.0;
/**
* Descrambler is used to descramble input data.
diff --git a/tv/tuner/1.0/IFrontend.hal b/tv/tuner/1.0/IFrontend.hal
index f7237ba..83e390d 100644
--- a/tv/tuner/1.0/IFrontend.hal
+++ b/tv/tuner/1.0/IFrontend.hal
@@ -16,6 +16,7 @@
package android.hardware.tv.tuner@1.0;
import IFrontendCallback;
+import ILnb;
/**
* A Tuner Frontend is used to tune to a frequency and lock signal.
@@ -81,4 +82,80 @@
* UNKNOWN_ERROR if failed for other reasons.
*/
close() generates (Result result);
+
+ /**
+ * Scan the frontend to use the settings given.
+ *
+ * This uses the frontend to start a scan from signal delivery information.
+ * If previous scan isn't completed, this call MUST stop previous scan,
+ * and start a new scan.
+ * Scan is an async call, with FrontendScanMessage sent via callback.
+ *
+ * @param settings Signal delivery information which the frontend uses to
+ * scan the signal.
+ * @param type the type which the frontend uses to scan the signal.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if tuning can't be applied at current stage,
+ * UNKNOWN_ERROR if tuning failed for other reasons.
+ */
+ scan(FrontendSettings settings, FrontendScanType type) generates (Result result);
+
+ /**
+ * Stops a previous scanning.
+ *
+ * If the method completes successfully, the frontend stop previous
+ * scanning.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successfully stop tuning.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ stopScan() generates (Result result);
+
+ /**
+ * Gets the statuses of the frontend.
+ *
+ * This retrieve the statuses of the frontend for given status types.
+ *
+ * @param statusTypes an array of status type which the caller request.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if tuning can't be applied at current stage,
+ * UNKNOWN_ERROR if tuning failed for other reasons.
+ * @return statuses an array of statuses which response the caller's
+ * request.
+ */
+ getStatus(vec<FrontendStatusType> statusTypes) generates (Result result, vec<FrontendStatus> statuses);
+
+ /**
+ * Sets Low-Noise Block downconverter (LNB) for satellite frontend.
+ *
+ * This assigns a hardware LNB resource to the satellite frontend. It can be
+ * called multiple times to update LNB assignment. The LNB resource must be
+ * released when the frontend is closed.
+ *
+ * @param lnbId the Id of assigned LNB resource.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if the frontend can't be set with a LNB, such as
+ * cable frontend.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ setLnb(LnbId lnbId) generates (Result result);
+
+ /**
+ * Enable or Disable Low Noise Amplifier (LNA).
+ *
+ * @param bEnable true if activate LNA module; false if deactivate LNA
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if the frontend doesn't support LNA.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ setLna(bool bEnable) generates (Result result);
};
diff --git a/tv/tuner/1.0/IFrontendCallback.hal b/tv/tuner/1.0/IFrontendCallback.hal
index e907049..8896a09 100644
--- a/tv/tuner/1.0/IFrontendCallback.hal
+++ b/tv/tuner/1.0/IFrontendCallback.hal
@@ -33,5 +33,13 @@
* Specification Version 4.2.
*/
oneway onDiseqcMessage(vec<uint8_t> diseqcMessage);
-};
+ /**
+ * The callback function that must be called by HAL implementation to notify
+ * the client of scan messages.
+ *
+ * @param type the type of scan message.
+ * @param message the scan message sent by HAL to the client.
+ */
+ oneway onScanMessage(FrontendScanMessageType type, FrontendScanMessage message);
+};
diff --git a/tv/tuner/1.0/ILnb.hal b/tv/tuner/1.0/ILnb.hal
new file mode 100644
index 0000000..6b7119e
--- /dev/null
+++ b/tv/tuner/1.0/ILnb.hal
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package android.hardware.tv.tuner@1.0;
+
+/**
+ * A Tuner LNB (low-noise block downconverter) is used by satellite frontend
+ * to receive the microwave signal from the satellite, amplify it, and
+ * downconvert the frequency to a lower frequency.
+ */
+interface ILnb {
+ /**
+ * Set the lnb's power voltage.
+ *
+ * @param voltage the power's voltage the Lnb to use.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_ARGUMENT if the selected voltage isn't allowed,
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ setVoltage(FrontendLnbVoltage voltage) generates (Result result);
+
+ /**
+ * Set the lnb's tone mode.
+ *
+ * @param tone the tone mode the Lnb to use.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_ARGUMENT if the selected tone mode isn't allowed,
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ setTone(FrontendLnbTone tone) generates (Result result);
+
+ /**
+ * Select the lnb's position.
+ *
+ * @param position the position the Lnb to use.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_ARGUMENT if the selected position isn't allowed,
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ setSatellitePosition(FrontendLnbPosition position) generates (Result result);
+
+ /**
+ * Sends DiSEqC (Digital Satellite Equipment Control) message.
+ *
+ * Client sends DiSeqc message to DiSEqc to LNB. The response message from
+ * the device comes back to the client through frontend's callback
+ * onDiseqcMessage.
+ *
+ * @param diseqcMessage a byte array of data for DiSEqC message which is
+ * specified by EUTELSAT Bus Functional Specification Version 4.2.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * INVALID_STATE if the frontend can't send DiSEqc Message, such as
+ * cable frontend.
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ sendDiseqcMessage(vec<uint8_t> diseqcMessage) generates (Result result);
+
+ /**
+ * Releases the LNB instance
+ *
+ * Associated resources are released. close may be called more than once.
+ * Calls to any other method after this will return an error
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if failed for other reasons.
+ */
+ close() generates (Result result);
+};
diff --git a/tv/tuner/1.0/ITuner.hal b/tv/tuner/1.0/ITuner.hal
index a0f3e8e..1cf0e38 100644
--- a/tv/tuner/1.0/ITuner.hal
+++ b/tv/tuner/1.0/ITuner.hal
@@ -19,10 +19,11 @@
import IDemux;
import IDescrambler;
import IFrontend;
+import ILnb;
/**
* Top level interface to manage Frontend, Demux and Decrambler hardware
- * resouces which are needed for Android TV.
+ * resources which are needed for Android TV.
*/
interface ITuner {
/**
@@ -45,6 +46,7 @@
* @param frontendId the id of the frontend to be opened.
* @return result Result status of the operation.
* SUCCESS if successful,
+ * UNAVAILABLE if no resource.
* UNKNOWN_ERROR if creation failed for other reasons.
* @return frontend the newly created frontend interface.
*/
@@ -62,10 +64,20 @@
* @return demuxId newly created demux id.
* @return demux the newly created demux interface.
*/
- openDemux()
+ openDemux()
generates (Result result, DemuxId demuxId, IDemux demux);
/**
+ * Retrieve the Demux's Capabilities.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if the inquiry failed for other reasons.
+ * @return caps the Demux's Capabilities.
+ */
+ getDemuxCaps() generates (Result result, DemuxCapabilities caps);
+
+ /**
* Create a new instance of Descrambler.
*
* It is used by the client to create a Descrambler instance.
@@ -75,6 +87,46 @@
* UNKNOWN_ERROR if creation failed for other reasons.
* @return descrambler the newly created descrambler interface.
*/
- openDescrambler()
+ openDescrambler()
generates (Result result, IDescrambler descrambler);
+
+ /**
+ * Retrieve the frontend's information.
+ *
+ * @param frontendId the id of the frontend to be inquiried.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if the inquiry failed for other reasons.
+ * @return info the frontend's information.
+ */
+ getFrontendInfo(FrontendId frontendId)
+ generates (Result result, FrontendInfo info);
+
+ /**
+ * Get low-noise block downconverter (LNB) IDs.
+ *
+ * It is used by the client to get all available LNBs' IDs.
+ *
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNKNOWN_ERROR if tuning failed for other reasons.
+ * @return frontendIds an array of LnbId for the available LNBs.
+ */
+ getLnbIds() generates (Result result, vec<LnbId> lnbIds);
+
+ /**
+ * Create a new instance of Lnb given a lnbId.
+ *
+ * It is used by the client to create a Lnb instance for satellite Frontend.
+ *
+ * @param lnbId the id of the LNB to be opened.
+ * @return result Result status of the operation.
+ * SUCCESS if successful,
+ * UNAVAILABLE if no resource.
+ * UNKNOWN_ERROR if creation failed for other reasons.
+ * @return lnb the newly created Lnb interface.
+ */
+ openLnbById(LnbId lnbId)
+ generates (Result result, ILnb lnb);
};
+
diff --git a/tv/tuner/1.0/default/Android.bp b/tv/tuner/1.0/default/Android.bp
index b211dd2..0ae8bcd 100644
--- a/tv/tuner/1.0/default/Android.bp
+++ b/tv/tuner/1.0/default/Android.bp
@@ -8,6 +8,7 @@
"Descrambler.cpp",
"Demux.cpp",
"Tuner.cpp",
+ "Lnb.cpp",
"service.cpp",
],
diff --git a/tv/tuner/1.0/default/Demux.cpp b/tv/tuner/1.0/default/Demux.cpp
index 4016c5a..8bb79f9 100644
--- a/tv/tuner/1.0/default/Demux.cpp
+++ b/tv/tuner/1.0/default/Demux.cpp
@@ -67,69 +67,75 @@
0x73, 0x63, 0x65, 0x6e, 0x65,
};
-Demux::Demux(uint32_t demuxId) {
+Demux::Demux(uint32_t demuxId, sp<Tuner> tuner) {
mDemuxId = demuxId;
+ mTunerService = tuner;
}
Demux::~Demux() {}
-bool Demux::createAndSaveMQ(uint32_t bufferSize, uint32_t filterId) {
- ALOGV("%s", __FUNCTION__);
-
- // Create a synchronized FMQ that supports blocking read/write
- std::unique_ptr<FilterMQ> tmpFilterMQ =
- std::unique_ptr<FilterMQ>(new (std::nothrow) FilterMQ(bufferSize, true));
- if (!tmpFilterMQ->isValid()) {
- ALOGW("Failed to create FMQ of filter with id: %d", filterId);
- return false;
- }
-
- mFilterMQs.resize(filterId + 1);
- mFilterMQs[filterId] = std::move(tmpFilterMQ);
-
- EventFlag* mFilterEventFlag;
- if (EventFlag::createEventFlag(mFilterMQs[filterId]->getEventFlagWord(), &mFilterEventFlag) !=
- OK) {
- return false;
- }
- mFilterEventFlags.resize(filterId + 1);
- mFilterEventFlags[filterId] = mFilterEventFlag;
- mFilterWriteCount.resize(filterId + 1);
- mFilterWriteCount[filterId] = 0;
- mThreadRunning.resize(filterId + 1);
-
- return true;
-}
-
Return<Result> Demux::setFrontendDataSource(uint32_t frontendId) {
ALOGV("%s", __FUNCTION__);
- mSourceFrontendId = frontendId;
+ if (mTunerService == nullptr) {
+ return Result::NOT_INITIALIZED;
+ }
- return Result::SUCCESS;
+ mFrontend = mTunerService->getFrontendById(frontendId);
+
+ if (mFrontend == nullptr) {
+ return Result::INVALID_STATE;
+ }
+
+ mFrontendSourceFile = mFrontend->getSourceFile();
+
+ mTunerService->setFrontendAsDemuxSource(frontendId, mDemuxId);
+ return startBroadcastInputLoop();
}
Return<void> Demux::addFilter(DemuxFilterType type, uint32_t bufferSize,
const sp<IDemuxCallback>& cb, addFilter_cb _hidl_cb) {
ALOGV("%s", __FUNCTION__);
- uint32_t filterId = mLastUsedFilterId + 1;
- mLastUsedFilterId += 1;
+ uint32_t filterId;
+
+ if (!mUnusedFilterIds.empty()) {
+ filterId = *mUnusedFilterIds.begin();
+
+ mUnusedFilterIds.erase(filterId);
+ } else {
+ filterId = ++mLastUsedFilterId;
+
+ mFilterCallbacks.resize(filterId + 1);
+ mFilterMQs.resize(filterId + 1);
+ mFilterEvents.resize(filterId + 1);
+ mFilterEventFlags.resize(filterId + 1);
+ mFilterThreadRunning.resize(filterId + 1);
+ mFilterThreads.resize(filterId + 1);
+ mFilterPids.resize(filterId + 1);
+ mFilterOutputs.resize(filterId + 1);
+ mFilterStatus.resize(filterId + 1);
+ }
+
+ mUsedFilterIds.insert(filterId);
if ((type != DemuxFilterType::PCR || type != DemuxFilterType::TS) && cb == nullptr) {
ALOGW("callback can't be null");
_hidl_cb(Result::INVALID_ARGUMENT, filterId);
return Void();
}
+
// Add callback
- mDemuxCallbacks.resize(filterId + 1);
- mDemuxCallbacks[filterId] = cb;
+ mFilterCallbacks[filterId] = cb;
- // Mapping from the filter ID to the filter type
- mFilterTypes.resize(filterId + 1);
- mFilterTypes[filterId] = type;
+ // Mapping from the filter ID to the filter event
+ DemuxFilterEvent event{
+ .filterId = filterId,
+ .filterType = type,
+ };
+ mFilterEvents[filterId] = event;
- if (!createAndSaveMQ(bufferSize, filterId)) {
+ if (!createFilterMQ(bufferSize, filterId)) {
_hidl_cb(Result::UNKNOWN_ERROR, -1);
return Void();
}
@@ -141,8 +147,8 @@
Return<void> Demux::getFilterQueueDesc(uint32_t filterId, getFilterQueueDesc_cb _hidl_cb) {
ALOGV("%s", __FUNCTION__);
- if (filterId < 0 || filterId > mLastUsedFilterId) {
- ALOGW("No filter with id: %d exists", filterId);
+ if (mUsedFilterIds.find(filterId) == mUsedFilterIds.end()) {
+ ALOGW("No filter with id: %d exists to get desc", filterId);
_hidl_cb(Result::INVALID_ARGUMENT, FilterMQ::Descriptor());
return Void();
}
@@ -151,70 +157,81 @@
return Void();
}
-Return<Result> Demux::configureFilter(uint32_t /* filterId */,
- const DemuxFilterSettings& /* settings */) {
+Return<Result> Demux::configureFilter(uint32_t filterId, const DemuxFilterSettings& settings) {
ALOGV("%s", __FUNCTION__);
+ switch (mFilterEvents[filterId].filterType) {
+ case DemuxFilterType::SECTION:
+ mFilterPids[filterId] = settings.section().tpid;
+ break;
+ case DemuxFilterType::PES:
+ mFilterPids[filterId] = settings.pesData().tpid;
+ break;
+ case DemuxFilterType::TS:
+ mFilterPids[filterId] = settings.ts().tpid;
+ break;
+ case DemuxFilterType::AUDIO:
+ mFilterPids[filterId] = settings.audio().tpid;
+ break;
+ case DemuxFilterType::VIDEO:
+ mFilterPids[filterId] = settings.video().tpid;
+ break;
+ case DemuxFilterType::RECORD:
+ mFilterPids[filterId] = settings.record().tpid;
+ break;
+ case DemuxFilterType::PCR:
+ mFilterPids[filterId] = settings.pcr().tpid;
+ break;
+ default:
+ return Result::UNKNOWN_ERROR;
+ }
return Result::SUCCESS;
}
Return<Result> Demux::startFilter(uint32_t filterId) {
ALOGV("%s", __FUNCTION__);
+ Result result;
- if (filterId < 0 || filterId > mLastUsedFilterId) {
- ALOGW("No filter with id: %d exists", filterId);
+ if (mUsedFilterIds.find(filterId) == mUsedFilterIds.end()) {
+ ALOGW("No filter with id: %d exists to start filter", filterId);
return Result::INVALID_ARGUMENT;
}
- DemuxFilterType filterType = mFilterTypes[filterId];
- Result result;
- DemuxFilterEvent event{
- .filterId = filterId,
- .filterType = filterType,
- };
-
- switch (filterType) {
- case DemuxFilterType::SECTION:
- result = startSectionFilterHandler(event);
- break;
- case DemuxFilterType::PES:
- result = startPesFilterHandler(event);
- break;
- case DemuxFilterType::TS:
- result = startTsFilterHandler();
- return Result::SUCCESS;
- case DemuxFilterType::AUDIO:
- case DemuxFilterType::VIDEO:
- result = startMediaFilterHandler(event);
- break;
- case DemuxFilterType::RECORD:
- result = startRecordFilterHandler(event);
- break;
- case DemuxFilterType::PCR:
- result = startPcrFilterHandler();
- return Result::SUCCESS;
- default:
- return Result::UNKNOWN_ERROR;
- }
+ result = startFilterLoop(filterId);
return result;
}
-Return<Result> Demux::stopFilter(uint32_t /* filterId */) {
+Return<Result> Demux::stopFilter(uint32_t filterId) {
ALOGV("%s", __FUNCTION__);
+ mFilterThreadRunning[filterId] = false;
+
+ std::lock_guard<std::mutex> lock(mFilterThreadLock);
+
return Result::SUCCESS;
}
-Return<Result> Demux::flushFilter(uint32_t /* filterId */) {
+Return<Result> Demux::flushFilter(uint32_t filterId) {
ALOGV("%s", __FUNCTION__);
+ // temp implementation to flush the FMQ
+ int size = mFilterMQs[filterId]->availableToRead();
+ char* buffer = new char[size];
+ mOutputMQ->read((unsigned char*)&buffer[0], size);
+ delete[] buffer;
+ mFilterStatus[filterId] = DemuxFilterStatus::DATA_READY;
+
return Result::SUCCESS;
}
-Return<Result> Demux::removeFilter(uint32_t /* filterId */) {
+Return<Result> Demux::removeFilter(uint32_t filterId) {
ALOGV("%s", __FUNCTION__);
+ // resetFilterRecords(filterId);
+ mUsedFilterIds.erase(filterId);
+ mUnusedFilterIds.insert(filterId);
+
return Result::SUCCESS;
}
@@ -239,25 +256,353 @@
Return<Result> Demux::close() {
ALOGV("%s", __FUNCTION__);
+ set<uint32_t>::iterator it;
+ mInputThread = 0;
+ mOutputThread = 0;
+ mFilterThreads.clear();
+ mUnusedFilterIds.clear();
+ mUsedFilterIds.clear();
+ mFilterCallbacks.clear();
+ mFilterMQs.clear();
+ mFilterEvents.clear();
+ mFilterEventFlags.clear();
+ mFilterOutputs.clear();
+ mFilterPids.clear();
+ mLastUsedFilterId = -1;
+
return Result::SUCCESS;
}
-bool Demux::writeSectionsAndCreateEvent(DemuxFilterEvent& event, uint32_t sectionNum) {
- event.events.resize(sectionNum);
- for (int i = 0; i < sectionNum; i++) {
- DemuxFilterSectionEvent secEvent;
- secEvent = {
- // temp dump meta data
- .tableId = 0,
- .version = 1,
- .sectionNum = 1,
- .dataLength = 530,
- };
- event.events[i].section(secEvent);
- if (!writeDataToFilterMQ(fakeDataInputBuffer, event.filterId)) {
- return false;
- }
+Return<Result> Demux::addOutput(uint32_t bufferSize, const sp<IDemuxCallback>& cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ // Create a synchronized FMQ that supports blocking read/write
+ std::unique_ptr<FilterMQ> tmpFilterMQ =
+ std::unique_ptr<FilterMQ>(new (std::nothrow) FilterMQ(bufferSize, true));
+ if (!tmpFilterMQ->isValid()) {
+ ALOGW("Failed to create output FMQ");
+ return Result::UNKNOWN_ERROR;
}
+
+ mOutputMQ = std::move(tmpFilterMQ);
+
+ if (EventFlag::createEventFlag(mOutputMQ->getEventFlagWord(), &mOutputEventFlag) != OK) {
+ return Result::UNKNOWN_ERROR;
+ }
+
+ mOutputCallback = cb;
+
+ return Result::SUCCESS;
+}
+
+Return<void> Demux::getOutputQueueDesc(getOutputQueueDesc_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (!mOutputMQ) {
+ _hidl_cb(Result::NOT_INITIALIZED, FilterMQ::Descriptor());
+ return Void();
+ }
+
+ _hidl_cb(Result::SUCCESS, *mOutputMQ->getDesc());
+ return Void();
+}
+
+Return<Result> Demux::configureOutput(const DemuxOutputSettings& settings) {
+ ALOGV("%s", __FUNCTION__);
+
+ mOutputConfigured = true;
+ mOutputSettings = settings;
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::attachOutputFilter(uint32_t /*filterId*/) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::detachOutputFilter(uint32_t /* filterId */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::startOutput() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::stopOutput() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::flushOutput() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::removeOutput() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::addInput(uint32_t bufferSize, const sp<IDemuxCallback>& cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ // Create a synchronized FMQ that supports blocking read/write
+ std::unique_ptr<FilterMQ> tmpInputMQ =
+ std::unique_ptr<FilterMQ>(new (std::nothrow) FilterMQ(bufferSize, true));
+ if (!tmpInputMQ->isValid()) {
+ ALOGW("Failed to create input FMQ");
+ return Result::UNKNOWN_ERROR;
+ }
+
+ mInputMQ = std::move(tmpInputMQ);
+
+ if (EventFlag::createEventFlag(mInputMQ->getEventFlagWord(), &mInputEventFlag) != OK) {
+ return Result::UNKNOWN_ERROR;
+ }
+
+ mInputCallback = cb;
+
+ return Result::SUCCESS;
+}
+
+Return<void> Demux::getInputQueueDesc(getInputQueueDesc_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (!mInputMQ) {
+ _hidl_cb(Result::NOT_INITIALIZED, FilterMQ::Descriptor());
+ return Void();
+ }
+
+ _hidl_cb(Result::SUCCESS, *mInputMQ->getDesc());
+ return Void();
+}
+
+Return<Result> Demux::configureInput(const DemuxInputSettings& settings) {
+ ALOGV("%s", __FUNCTION__);
+
+ mInputConfigured = true;
+ mInputSettings = settings;
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::startInput() {
+ ALOGV("%s", __FUNCTION__);
+
+ if (!mInputCallback) {
+ return Result::NOT_INITIALIZED;
+ }
+
+ if (!mInputConfigured) {
+ return Result::INVALID_STATE;
+ }
+
+ pthread_create(&mInputThread, NULL, __threadLoopInput, this);
+ pthread_setname_np(mInputThread, "demux_input_waiting_loop");
+
+ // TODO start another thread to send filter status callback to the framework
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::stopInput() {
+ ALOGV("%s", __FUNCTION__);
+
+ mInputThreadRunning = false;
+
+ std::lock_guard<std::mutex> lock(mInputThreadLock);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::flushInput() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Demux::removeInput() {
+ ALOGV("%s", __FUNCTION__);
+
+ mInputMQ = nullptr;
+
+ return Result::SUCCESS;
+}
+
+Result Demux::startFilterLoop(uint32_t filterId) {
+ struct ThreadArgs* threadArgs = (struct ThreadArgs*)malloc(sizeof(struct ThreadArgs));
+ threadArgs->user = this;
+ threadArgs->filterId = filterId;
+
+ pthread_t mFilterThread;
+ pthread_create(&mFilterThread, NULL, __threadLoopFilter, (void*)threadArgs);
+ mFilterThreads[filterId] = mFilterThread;
+ pthread_setname_np(mFilterThread, "demux_filter_waiting_loop");
+
+ return Result::SUCCESS;
+}
+
+Result Demux::startSectionFilterHandler(uint32_t filterId) {
+ if (mFilterOutputs[filterId].empty()) {
+ return Result::SUCCESS;
+ }
+ if (!writeSectionsAndCreateEvent(filterId, mFilterOutputs[filterId])) {
+ ALOGD("[Demux] filter %d fails to write into FMQ. Ending thread", filterId);
+ return Result::UNKNOWN_ERROR;
+ }
+
+ mFilterOutputs[filterId].clear();
+
+ return Result::SUCCESS;
+}
+
+Result Demux::startPesFilterHandler(uint32_t filterId) {
+ std::lock_guard<std::mutex> lock(mFilterEventLock);
+ if (mFilterOutputs[filterId].empty()) {
+ return Result::SUCCESS;
+ }
+
+ for (int i = 0; i < mFilterOutputs[filterId].size(); i += 188) {
+ if (mPesSizeLeft == 0) {
+ uint32_t prefix = (mFilterOutputs[filterId][i + 4] << 16) |
+ (mFilterOutputs[filterId][i + 5] << 8) |
+ mFilterOutputs[filterId][i + 6];
+ ALOGD("[Demux] prefix %d", prefix);
+ if (prefix == 0x000001) {
+ // TODO handle mulptiple Pes filters
+ mPesSizeLeft =
+ (mFilterOutputs[filterId][i + 8] << 8) | mFilterOutputs[filterId][i + 9];
+ mPesSizeLeft += 6;
+ ALOGD("[Demux] pes data length %d", mPesSizeLeft);
+ } else {
+ continue;
+ }
+ }
+
+ int endPoint = min(184, mPesSizeLeft);
+ // append data and check size
+ vector<uint8_t>::const_iterator first = mFilterOutputs[filterId].begin() + i + 4;
+ vector<uint8_t>::const_iterator last = mFilterOutputs[filterId].begin() + i + 4 + endPoint;
+ mPesOutput.insert(mPesOutput.end(), first, last);
+ // size does not match then continue
+ mPesSizeLeft -= endPoint;
+ if (mPesSizeLeft > 0) {
+ continue;
+ }
+ // size match then create event
+ if (!writeDataToFilterMQ(mPesOutput, filterId)) {
+ mFilterOutputs[filterId].clear();
+ return Result::INVALID_STATE;
+ }
+ maySendFilterStatusCallback(filterId);
+ DemuxFilterPesEvent pesEvent;
+ pesEvent = {
+ // temp dump meta data
+ .streamId = mPesOutput[3],
+ .dataLength = static_cast<uint16_t>(mPesOutput.size()),
+ };
+ ALOGD("[Demux] assembled pes data length %d", pesEvent.dataLength);
+
+ int size = mFilterEvents[filterId].events.size();
+ mFilterEvents[filterId].events.resize(size + 1);
+ mFilterEvents[filterId].events[size].pes(pesEvent);
+ mPesOutput.clear();
+ }
+
+ mFilterOutputs[filterId].clear();
+
+ return Result::SUCCESS;
+}
+
+Result Demux::startTsFilterHandler() {
+ // TODO handle starting TS filter
+ return Result::SUCCESS;
+}
+
+Result Demux::startMediaFilterHandler(uint32_t filterId) {
+ DemuxFilterMediaEvent mediaEvent;
+ mediaEvent = {
+ // temp dump meta data
+ .pts = 0,
+ .dataLength = 530,
+ .secureMemory = nullptr,
+ };
+ mFilterEvents[filterId].events.resize(1);
+ mFilterEvents[filterId].events[0].media() = mediaEvent;
+
+ mFilterOutputs[filterId].clear();
+ // TODO handle write FQM for media stream
+ return Result::SUCCESS;
+}
+
+Result Demux::startRecordFilterHandler(uint32_t filterId) {
+ DemuxFilterRecordEvent recordEvent;
+ recordEvent = {
+ // temp dump meta data
+ .tpid = 0,
+ .packetNum = 0,
+ };
+ recordEvent.indexMask.tsIndexMask() = 0x01;
+ mFilterEvents[filterId].events.resize(1);
+ mFilterEvents[filterId].events[0].ts() = recordEvent;
+
+ mFilterOutputs[filterId].clear();
+ return Result::SUCCESS;
+}
+
+Result Demux::startPcrFilterHandler() {
+ // TODO handle starting PCR filter
+ return Result::SUCCESS;
+}
+
+bool Demux::createFilterMQ(uint32_t bufferSize, uint32_t filterId) {
+ ALOGV("%s", __FUNCTION__);
+
+ // Create a synchronized FMQ that supports blocking read/write
+ std::unique_ptr<FilterMQ> tmpFilterMQ =
+ std::unique_ptr<FilterMQ>(new (std::nothrow) FilterMQ(bufferSize, true));
+ if (!tmpFilterMQ->isValid()) {
+ ALOGW("Failed to create FMQ of filter with id: %d", filterId);
+ return false;
+ }
+
+ mFilterMQs[filterId] = std::move(tmpFilterMQ);
+
+ EventFlag* filterEventFlag;
+ if (EventFlag::createEventFlag(mFilterMQs[filterId]->getEventFlagWord(), &filterEventFlag) !=
+ OK) {
+ return false;
+ }
+ mFilterEventFlags[filterId] = filterEventFlag;
+
+ return true;
+}
+
+bool Demux::writeSectionsAndCreateEvent(uint32_t filterId, vector<uint8_t> data) {
+ // TODO check how many sections has been read
+ std::lock_guard<std::mutex> lock(mFilterEventLock);
+ if (!writeDataToFilterMQ(data, filterId)) {
+ return false;
+ }
+ int size = mFilterEvents[filterId].events.size();
+ mFilterEvents[filterId].events.resize(size + 1);
+ DemuxFilterSectionEvent secEvent;
+ secEvent = {
+ // temp dump meta data
+ .tableId = 0,
+ .version = 1,
+ .sectionNum = 1,
+ .dataLength = static_cast<uint16_t>(data.size()),
+ };
+ mFilterEvents[filterId].events[size].section(secEvent);
return true;
}
@@ -269,116 +614,108 @@
return false;
}
-Result Demux::startSectionFilterHandler(DemuxFilterEvent event) {
- struct ThreadArgs* threadArgs = (struct ThreadArgs*)malloc(sizeof(struct ThreadArgs));
- threadArgs->user = this;
- threadArgs->event = &event;
+bool Demux::readInputFMQ() {
+ // Read input data from the input FMQ
+ int size = mInputMQ->availableToRead();
+ int inputPacketSize = mInputSettings.packetSize;
+ vector<uint8_t> dataOutputBuffer;
+ dataOutputBuffer.resize(inputPacketSize);
- pthread_create(&mThreadId, NULL, __threadLoop, (void*)threadArgs);
- pthread_setname_np(mThreadId, "demux_filter_waiting_loop");
-
- return Result::SUCCESS;
-}
-
-Result Demux::startPesFilterHandler(DemuxFilterEvent& event) {
- // TODO generate multiple events in one event callback
- DemuxFilterPesEvent pesEvent;
- pesEvent = {
- // temp dump meta data
- .streamId = 0,
- .dataLength = 530,
- };
- event.events.resize(1);
- event.events[0].pes(pesEvent);
- /*pthread_create(&mThreadId, NULL, __threadLoop, this);
- pthread_setname_np(mThreadId, "demux_section_filter_waiting_loop");*/
- if (!writeDataToFilterMQ(fakeDataInputBuffer, event.filterId)) {
- return Result::INVALID_STATE;
+ // Dispatch the packet to the PID matching filter output buffer
+ for (int i = 0; i < size / inputPacketSize; i++) {
+ if (!mInputMQ->read(dataOutputBuffer.data(), inputPacketSize)) {
+ return false;
+ }
+ startTsFilter(dataOutputBuffer);
}
- if (mDemuxCallbacks[event.filterId] == nullptr) {
- return Result::NOT_INITIALIZED;
+ return true;
+}
+
+void Demux::startTsFilter(vector<uint8_t> data) {
+ set<uint32_t>::iterator it;
+ for (it = mUsedFilterIds.begin(); it != mUsedFilterIds.end(); it++) {
+ uint16_t pid = ((data[1] & 0x1f) << 8) | ((data[2] & 0xff));
+ ALOGW("start ts filter pid: %d", pid);
+ if (pid == mFilterPids[*it]) {
+ mFilterOutputs[*it].insert(mFilterOutputs[*it].end(), data.begin(), data.end());
+ }
+ }
+}
+
+bool Demux::startFilterDispatcher() {
+ Result result;
+ set<uint32_t>::iterator it;
+
+ // Handle the output data per filter type
+ for (it = mUsedFilterIds.begin(); it != mUsedFilterIds.end(); it++) {
+ switch (mFilterEvents[*it].filterType) {
+ case DemuxFilterType::SECTION:
+ result = startSectionFilterHandler(*it);
+ break;
+ case DemuxFilterType::PES:
+ result = startPesFilterHandler(*it);
+ break;
+ case DemuxFilterType::TS:
+ result = startTsFilterHandler();
+ break;
+ case DemuxFilterType::AUDIO:
+ case DemuxFilterType::VIDEO:
+ result = startMediaFilterHandler(*it);
+ break;
+ case DemuxFilterType::RECORD:
+ result = startRecordFilterHandler(*it);
+ break;
+ case DemuxFilterType::PCR:
+ result = startPcrFilterHandler();
+ break;
+ default:
+ return false;
+ }
}
- mDemuxCallbacks[event.filterId]->onFilterEvent(event);
- return Result::SUCCESS;
+ return result == Result::SUCCESS;
}
-Result Demux::startTsFilterHandler() {
- // TODO handle starting TS filter
- return Result::SUCCESS;
-}
-
-Result Demux::startMediaFilterHandler(DemuxFilterEvent& event) {
- DemuxFilterMediaEvent mediaEvent;
- mediaEvent = {
- // temp dump meta data
- .pts = 0,
- .dataLength = 530,
- .secureMemory = nullptr,
- };
- event.events.resize(1);
- event.events[0].media() = mediaEvent;
- // TODO handle write FQM for media stream
- return Result::SUCCESS;
-}
-
-Result Demux::startRecordFilterHandler(DemuxFilterEvent& event) {
- DemuxFilterRecordEvent recordEvent;
- recordEvent = {
- // temp dump meta data
- .tpid = 0,
- .packetNum = 0,
- };
- recordEvent.indexMask.tsIndexMask() = 0x01;
- event.events.resize(1);
- event.events[0].ts() = recordEvent;
- return Result::SUCCESS;
-}
-
-Result Demux::startPcrFilterHandler() {
- // TODO handle starting PCR filter
- return Result::SUCCESS;
-}
-
-void* Demux::__threadLoop(void* threadArg) {
+void* Demux::__threadLoopFilter(void* threadArg) {
Demux* const self = static_cast<Demux*>(((struct ThreadArgs*)threadArg)->user);
- self->filterThreadLoop(((struct ThreadArgs*)threadArg)->event);
+ self->filterThreadLoop(((struct ThreadArgs*)threadArg)->filterId);
return 0;
}
-void Demux::filterThreadLoop(DemuxFilterEvent* event) {
- uint32_t filterId = event->filterId;
- ALOGD("[Demux] filter %d threadLoop start.", filterId);
- mThreadRunning[filterId] = true;
+void* Demux::__threadLoopInput(void* user) {
+ Demux* const self = static_cast<Demux*>(user);
+ self->inputThreadLoop();
+ return 0;
+}
- while (mThreadRunning[filterId]) {
+void Demux::filterThreadLoop(uint32_t filterId) {
+ ALOGD("[Demux] filter %d threadLoop start.", filterId);
+ std::lock_guard<std::mutex> lock(mFilterThreadLock);
+ mFilterThreadRunning[filterId] = true;
+
+ // For the first time of filter output, implementation needs to send the filter
+ // Event Callback without waiting for the DATA_CONSUMED to init the process.
+ while (mFilterThreadRunning[filterId]) {
+ if (mFilterEvents[filterId].events.size() == 0) {
+ ALOGD("[Demux] wait for filter data output.");
+ usleep(1000 * 1000);
+ continue;
+ }
+ // After successfully write, send a callback and wait for the read to be done
+ mFilterCallbacks[filterId]->onFilterEvent(mFilterEvents[filterId]);
+ mFilterEvents[filterId].events.resize(0);
+ mFilterStatus[filterId] = DemuxFilterStatus::DATA_READY;
+ mFilterCallbacks[filterId]->onFilterStatus(filterId, mFilterStatus[filterId]);
+ break;
+ }
+
+ while (mFilterThreadRunning[filterId]) {
uint32_t efState = 0;
// We do not wait for the last round of writen data to be read to finish the thread
// because the VTS can verify the reading itself.
for (int i = 0; i < SECTION_WRITE_COUNT; i++) {
- DemuxFilterEvent filterEvent{
- .filterId = filterId,
- .filterType = event->filterType,
- };
- if (!writeSectionsAndCreateEvent(filterEvent, 2)) {
- ALOGD("[Demux] filter %d fails to write into FMQ. Ending thread", filterId);
- break;
- }
- mFilterWriteCount[filterId]++;
- if (mDemuxCallbacks[filterId] == nullptr) {
- ALOGD("[Demux] filter %d does not hava callback. Ending thread", filterId);
- break;
- }
- // After successfully write, send a callback and wait for the read to be done
- mDemuxCallbacks[filterId]->onFilterEvent(filterEvent);
- // We do not wait for the last read to be done
- // VTS can verify the read result itself.
- if (i == SECTION_WRITE_COUNT - 1) {
- ALOGD("[Demux] filter %d writing done. Ending thread", filterId);
- break;
- }
- while (mThreadRunning[filterId]) {
+ while (mFilterThreadRunning[filterId]) {
status_t status = mFilterEventFlags[filterId]->wait(
static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_CONSUMED), &efState,
WAIT_TIMEOUT, true /* retry on spurious wake */);
@@ -388,15 +725,184 @@
}
break;
}
- }
- mFilterWriteCount[filterId] = 0;
- mThreadRunning[filterId] = false;
+ if (mFilterCallbacks[filterId] == nullptr) {
+ ALOGD("[Demux] filter %d does not hava callback. Ending thread", filterId);
+ break;
+ }
+
+ maySendFilterStatusCallback(filterId);
+
+ while (mFilterThreadRunning[filterId]) {
+ std::lock_guard<std::mutex> lock(mFilterEventLock);
+ if (mFilterEvents[filterId].events.size() == 0) {
+ continue;
+ }
+ // After successfully write, send a callback and wait for the read to be done
+ mFilterCallbacks[filterId]->onFilterEvent(mFilterEvents[filterId]);
+ mFilterEvents[filterId].events.resize(0);
+ break;
+ }
+ // We do not wait for the last read to be done
+ // VTS can verify the read result itself.
+ if (i == SECTION_WRITE_COUNT - 1) {
+ ALOGD("[Demux] filter %d writing done. Ending thread", filterId);
+ break;
+ }
+ }
+ mFilterThreadRunning[filterId] = false;
}
ALOGD("[Demux] filter thread ended.");
}
+void Demux::inputThreadLoop() {
+ ALOGD("[Demux] input threadLoop start.");
+ std::lock_guard<std::mutex> lock(mInputThreadLock);
+ mInputThreadRunning = true;
+
+ while (mInputThreadRunning) {
+ uint32_t efState = 0;
+ status_t status =
+ mInputEventFlag->wait(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_READY),
+ &efState, WAIT_TIMEOUT, true /* retry on spurious wake */);
+ if (status != OK) {
+ ALOGD("[Demux] wait for data ready on the input FMQ");
+ continue;
+ }
+ // Our current implementation filter the data and write it into the filter FMQ immediately
+ // after the DATA_READY from the VTS/framework
+ if (!readInputFMQ() || !startFilterDispatcher()) {
+ ALOGD("[Demux] input data failed to be filtered. Ending thread");
+ break;
+ }
+
+ maySendInputStatusCallback();
+ }
+
+ mInputThreadRunning = false;
+ ALOGD("[Demux] input thread ended.");
+}
+
+void Demux::maySendInputStatusCallback() {
+ std::lock_guard<std::mutex> lock(mInputStatusLock);
+ int availableToRead = mInputMQ->availableToRead();
+ int availableToWrite = mInputMQ->availableToWrite();
+
+ DemuxInputStatus newStatus =
+ checkInputStatusChange(availableToWrite, availableToRead, mInputSettings.highThreshold,
+ mInputSettings.lowThreshold);
+ if (mIntputStatus != newStatus) {
+ mInputCallback->onInputStatus(newStatus);
+ mIntputStatus = newStatus;
+ }
+}
+
+void Demux::maySendFilterStatusCallback(uint32_t filterId) {
+ std::lock_guard<std::mutex> lock(mFilterStatusLock);
+ int availableToRead = mFilterMQs[filterId]->availableToRead();
+ int availableToWrite = mFilterMQs[filterId]->availableToWrite();
+ int fmqSize = mFilterMQs[filterId]->getQuantumCount();
+
+ DemuxFilterStatus newStatus =
+ checkFilterStatusChange(filterId, availableToWrite, availableToRead,
+ ceil(fmqSize * 0.75), ceil(fmqSize * 0.25));
+ if (mFilterStatus[filterId] != newStatus) {
+ mFilterCallbacks[filterId]->onFilterStatus(filterId, newStatus);
+ mFilterStatus[filterId] = newStatus;
+ }
+}
+
+DemuxInputStatus Demux::checkInputStatusChange(uint32_t availableToWrite, uint32_t availableToRead,
+ uint32_t highThreshold, uint32_t lowThreshold) {
+ if (availableToWrite == 0) {
+ return DemuxInputStatus::SPACE_FULL;
+ } else if (availableToRead > highThreshold) {
+ return DemuxInputStatus::SPACE_ALMOST_FULL;
+ } else if (availableToRead < lowThreshold) {
+ return DemuxInputStatus::SPACE_ALMOST_EMPTY;
+ } else if (availableToRead == 0) {
+ return DemuxInputStatus::SPACE_EMPTY;
+ }
+ return mIntputStatus;
+}
+
+DemuxFilterStatus Demux::checkFilterStatusChange(uint32_t filterId, uint32_t availableToWrite,
+ uint32_t availableToRead, uint32_t highThreshold,
+ uint32_t lowThreshold) {
+ if (availableToWrite == 0) {
+ return DemuxFilterStatus::OVERFLOW;
+ } else if (availableToRead > highThreshold) {
+ return DemuxFilterStatus::HIGH_WATER;
+ } else if (availableToRead < lowThreshold) {
+ return DemuxFilterStatus::LOW_WATER;
+ }
+ return mFilterStatus[filterId];
+}
+
+Result Demux::startBroadcastInputLoop() {
+ pthread_create(&mBroadcastInputThread, NULL, __threadLoopBroadcast, this);
+ pthread_setname_np(mBroadcastInputThread, "broadcast_input_thread");
+
+ return Result::SUCCESS;
+}
+
+void* Demux::__threadLoopBroadcast(void* user) {
+ Demux* const self = static_cast<Demux*>(user);
+ self->broadcastInputThreadLoop();
+ return 0;
+}
+
+void Demux::broadcastInputThreadLoop() {
+ std::lock_guard<std::mutex> lock(mBroadcastInputThreadLock);
+ mBroadcastInputThreadRunning = true;
+ mKeepFetchingDataFromFrontend = true;
+
+ // open the stream and get its length
+ std::ifstream inputData(mFrontendSourceFile, std::ifstream::binary);
+ // TODO take the packet size from the frontend setting
+ int packetSize = 188;
+ int writePacketAmount = 6;
+ char* buffer = new char[packetSize];
+ ALOGW("[Demux] broadcast input thread loop start %s", mFrontendSourceFile.c_str());
+ if (!inputData.is_open()) {
+ mBroadcastInputThreadRunning = false;
+ ALOGW("[Demux] Error %s", strerror(errno));
+ }
+
+ while (mBroadcastInputThreadRunning) {
+ // move the stream pointer for packet size * 6 every read until the end
+ while (mKeepFetchingDataFromFrontend) {
+ for (int i = 0; i < writePacketAmount; i++) {
+ inputData.read(buffer, packetSize);
+ if (!inputData) {
+ mBroadcastInputThreadRunning = false;
+ break;
+ }
+ // filter and dispatch filter output
+ vector<uint8_t> byteBuffer;
+ byteBuffer.resize(packetSize);
+ for (int index = 0; index < byteBuffer.size(); index++) {
+ byteBuffer[index] = static_cast<uint8_t>(buffer[index]);
+ }
+ startTsFilter(byteBuffer);
+ }
+ startFilterDispatcher();
+ sleep(1);
+ }
+ }
+
+ ALOGW("[Demux] Broadcast Input thread end.");
+ delete[] buffer;
+ inputData.close();
+}
+
+void Demux::stopBroadcastInput() {
+ mKeepFetchingDataFromFrontend = false;
+ mBroadcastInputThreadRunning = false;
+ std::lock_guard<std::mutex> lock(mBroadcastInputThreadLock);
+}
+
} // namespace implementation
} // namespace V1_0
} // namespace tuner
diff --git a/tv/tuner/1.0/default/Demux.h b/tv/tuner/1.0/default/Demux.h
index 8b00266..ba0b9b0 100644
--- a/tv/tuner/1.0/default/Demux.h
+++ b/tv/tuner/1.0/default/Demux.h
@@ -19,6 +19,10 @@
#include <android/hardware/tv/tuner/1.0/IDemux.h>
#include <fmq/MessageQueue.h>
+#include <math.h>
+#include <set>
+#include "Frontend.h"
+#include "Tuner.h"
using namespace std;
@@ -39,9 +43,14 @@
using FilterMQ = MessageQueue<uint8_t, kSynchronizedReadWrite>;
+class Tuner;
+class Frontend;
+
class Demux : public IDemux {
public:
- Demux(uint32_t demuxId);
+ Demux(uint32_t demuxId, sp<Tuner> tuner);
+
+ ~Demux();
virtual Return<Result> setFrontendDataSource(uint32_t frontendId) override;
@@ -68,8 +77,69 @@
virtual Return<void> getAvSyncTime(AvSyncHwId avSyncHwId, getAvSyncTime_cb _hidl_cb) override;
+ virtual Return<Result> addInput(uint32_t bufferSize, const sp<IDemuxCallback>& cb) override;
+
+ virtual Return<void> getInputQueueDesc(getInputQueueDesc_cb _hidl_cb) override;
+
+ virtual Return<Result> configureInput(const DemuxInputSettings& settings) override;
+
+ virtual Return<Result> startInput() override;
+
+ virtual Return<Result> stopInput() override;
+
+ virtual Return<Result> flushInput() override;
+
+ virtual Return<Result> removeInput() override;
+
+ virtual Return<Result> addOutput(uint32_t bufferSize, const sp<IDemuxCallback>& cb) override;
+
+ virtual Return<void> getOutputQueueDesc(getOutputQueueDesc_cb _hidl_cb) override;
+
+ virtual Return<Result> configureOutput(const DemuxOutputSettings& settings) override;
+
+ virtual Return<Result> attachOutputFilter(uint32_t filterId) override;
+
+ virtual Return<Result> detachOutputFilter(uint32_t filterId) override;
+
+ virtual Return<Result> startOutput() override;
+
+ virtual Return<Result> stopOutput() override;
+
+ virtual Return<Result> flushOutput() override;
+
+ virtual Return<Result> removeOutput() override;
+
+ // Functions interacts with Tuner Service
+ void stopBroadcastInput();
+
private:
- virtual ~Demux();
+ // Tuner service
+ sp<Tuner> mTunerService;
+
+ // Frontend source
+ sp<Frontend> mFrontend;
+ string mFrontendSourceFile;
+
+ // A struct that passes the arguments to a newly created filter thread
+ struct ThreadArgs {
+ Demux* user;
+ uint32_t filterId;
+ };
+
+ /**
+ * Filter handlers to handle the data filtering.
+ * They are also responsible to write the filtered output into the filter FMQ
+ * and update the filterEvent bound with the same filterId.
+ */
+ Result startSectionFilterHandler(uint32_t filterId);
+ Result startPesFilterHandler(uint32_t filterId);
+ Result startTsFilterHandler();
+ Result startMediaFilterHandler(uint32_t filterId);
+ Result startRecordFilterHandler(uint32_t filterId);
+ Result startPcrFilterHandler();
+ Result startFilterLoop(uint32_t filterId);
+ Result startBroadcastInputLoop();
+
/**
* To create a FilterMQ with the the next available Filter ID.
* Creating Event Flag at the same time.
@@ -77,60 +147,118 @@
*
* Return false is any of the above processes fails.
*/
- bool createAndSaveMQ(uint32_t bufferSize, uint32_t filterId);
+ bool createFilterMQ(uint32_t bufferSize, uint32_t filterId);
+ bool createMQ(FilterMQ* queue, EventFlag* eventFlag, uint32_t bufferSize);
void deleteEventFlag();
bool writeDataToFilterMQ(const std::vector<uint8_t>& data, uint32_t filterId);
- Result startSectionFilterHandler(DemuxFilterEvent event);
- Result startPesFilterHandler(DemuxFilterEvent& event);
- Result startTsFilterHandler();
- Result startMediaFilterHandler(DemuxFilterEvent& event);
- Result startRecordFilterHandler(DemuxFilterEvent& event);
- Result startPcrFilterHandler();
- bool writeSectionsAndCreateEvent(DemuxFilterEvent& event, uint32_t sectionNum);
- void filterThreadLoop(DemuxFilterEvent* event);
- static void* __threadLoop(void* data);
+ bool readDataFromMQ();
+ bool writeSectionsAndCreateEvent(uint32_t filterId, vector<uint8_t> data);
+ void maySendInputStatusCallback();
+ void maySendFilterStatusCallback(uint32_t filterId);
+ DemuxInputStatus checkInputStatusChange(uint32_t availableToWrite, uint32_t availableToRead,
+ uint32_t highThreshold, uint32_t lowThreshold);
+ DemuxFilterStatus checkFilterStatusChange(uint32_t filterId, uint32_t availableToWrite,
+ uint32_t availableToRead, uint32_t highThreshold,
+ uint32_t lowThreshold);
+ /**
+ * A dispatcher to read and dispatch input data to all the started filters.
+ * Each filter handler handles the data filtering/output writing/filterEvent updating.
+ */
+ bool readInputFMQ();
+ void startTsFilter(vector<uint8_t> data);
+ bool startFilterDispatcher();
+ static void* __threadLoopFilter(void* data);
+ static void* __threadLoopInput(void* user);
+ static void* __threadLoopBroadcast(void* user);
+ void filterThreadLoop(uint32_t filterId);
+ void inputThreadLoop();
+ void broadcastInputThreadLoop();
uint32_t mDemuxId;
- uint32_t mSourceFrontendId;
/**
- * Record the last used filer id. Initial value is -1.
+ * Record the last used filter id. Initial value is -1.
* Filter Id starts with 0.
*/
uint32_t mLastUsedFilterId = -1;
/**
+ * Record all the used filter Ids.
+ * Any removed filter id should be removed from this set.
+ */
+ set<uint32_t> mUsedFilterIds;
+ /**
+ * Record all the unused filter Ids within mLastUsedFilterId.
+ * Removed filter Id should be added into this set.
+ * When this set is not empty, ids here should be allocated first
+ * and added into usedFilterIds.
+ */
+ set<uint32_t> mUnusedFilterIds;
+ /**
* A list of created FilterMQ ptrs.
* The array number is the filter ID.
*/
+ vector<uint16_t> mFilterPids;
+ vector<vector<uint8_t>> mFilterOutputs;
vector<unique_ptr<FilterMQ>> mFilterMQs;
- vector<DemuxFilterType> mFilterTypes;
vector<EventFlag*> mFilterEventFlags;
+ vector<DemuxFilterEvent> mFilterEvents;
+ unique_ptr<FilterMQ> mInputMQ;
+ unique_ptr<FilterMQ> mOutputMQ;
+ EventFlag* mInputEventFlag;
+ EventFlag* mOutputEventFlag;
/**
* Demux callbacks used on filter events or IO buffer status
*/
- vector<sp<IDemuxCallback>> mDemuxCallbacks;
- /**
- * How many times a specific filter has written since started
- */
- vector<uint16_t> mFilterWriteCount;
- pthread_t mThreadId = 0;
+ vector<sp<IDemuxCallback>> mFilterCallbacks;
+ sp<IDemuxCallback> mInputCallback;
+ sp<IDemuxCallback> mOutputCallback;
+ bool mInputConfigured = false;
+ bool mOutputConfigured = false;
+ DemuxInputSettings mInputSettings;
+ DemuxOutputSettings mOutputSettings;
+
+ // Thread handlers
+ pthread_t mInputThread;
+ pthread_t mOutputThread;
+ pthread_t mBroadcastInputThread;
+ vector<pthread_t> mFilterThreads;
+
+ // FMQ status local records
+ DemuxInputStatus mIntputStatus;
+ vector<DemuxFilterStatus> mFilterStatus;
/**
* If a specific filter's writing loop is still running
*/
- vector<bool> mThreadRunning;
+ vector<bool> mFilterThreadRunning;
+ bool mInputThreadRunning;
+ bool mBroadcastInputThreadRunning;
+ bool mKeepFetchingDataFromFrontend;
/**
* Lock to protect writes to the FMQs
*/
std::mutex mWriteLock;
/**
+ * Lock to protect writes to the filter event
+ */
+ // TODO make each filter separate event lock
+ std::mutex mFilterEventLock;
+ /**
+ * Lock to protect writes to the input status
+ */
+ std::mutex mInputStatusLock;
+ std::mutex mFilterStatusLock;
+ std::mutex mBroadcastInputThreadLock;
+ std::mutex mFilterThreadLock;
+ std::mutex mInputThreadLock;
+ /**
* How many times a filter should write
* TODO make this dynamic/random/can take as a parameter
*/
const uint16_t SECTION_WRITE_COUNT = 10;
- // A struct that passes the arguments to a newly created filter thread
- struct ThreadArgs {
- Demux* user;
- DemuxFilterEvent* event;
- };
+
+ // temp handle single PES filter
+ // TODO handle mulptiple Pes filters
+ int mPesSizeLeft = 0;
+ vector<uint8_t> mPesOutput;
};
} // namespace implementation
diff --git a/tv/tuner/1.0/default/Frontend.cpp b/tv/tuner/1.0/default/Frontend.cpp
index 3dcc2b1..1e07edd 100644
--- a/tv/tuner/1.0/default/Frontend.cpp
+++ b/tv/tuner/1.0/default/Frontend.cpp
@@ -27,14 +27,10 @@
namespace V1_0 {
namespace implementation {
-Frontend::Frontend() {
- // Init callback to nullptr
- mCallback = nullptr;
-}
-
-Frontend::Frontend(FrontendType type, FrontendId id) {
+Frontend::Frontend(FrontendType type, FrontendId id, sp<Tuner> tuner) {
mType = type;
mId = id;
+ mTunerService = tuner;
// Init callback to nullptr
mCallback = nullptr;
}
@@ -67,13 +63,52 @@
return Result::INVALID_STATE;
}
- mCallback->onEvent(FrontendEventType::NO_SIGNAL);
+ // TODO dynamically allocate file to the source file
+ mSourceStreamFile = FRONTEND_STREAM_FILE;
+
+ mCallback->onEvent(FrontendEventType::LOCKED);
return Result::SUCCESS;
}
Return<Result> Frontend::stopTune() {
ALOGV("%s", __FUNCTION__);
+ mTunerService->frontendStopTune(mId);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Frontend::scan(const FrontendSettings& /* settings */, FrontendScanType /* type */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Frontend::stopScan() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<void> Frontend::getStatus(const hidl_vec<FrontendStatusType>& /* statusTypes */,
+ getStatus_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ vector<FrontendStatus> statuses;
+ _hidl_cb(Result::SUCCESS, statuses);
+
+ return Void();
+}
+
+Return<Result> Frontend::setLna(bool /* bEnable */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Frontend::setLnb(uint32_t /* lnb */) {
+ ALOGV("%s", __FUNCTION__);
+
return Result::SUCCESS;
}
@@ -85,6 +120,10 @@
return mId;
}
+string Frontend::getSourceFile() {
+ return mSourceStreamFile;
+}
+
} // namespace implementation
} // namespace V1_0
} // namespace tuner
diff --git a/tv/tuner/1.0/default/Frontend.h b/tv/tuner/1.0/default/Frontend.h
index f77a0d8..07fa7b9 100644
--- a/tv/tuner/1.0/default/Frontend.h
+++ b/tv/tuner/1.0/default/Frontend.h
@@ -18,7 +18,9 @@
#define ANDROID_HARDWARE_TV_TUNER_V1_0_FRONTEND_H_
#include <android/hardware/tv/tuner/1.0/IFrontend.h>
-#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <fstream>
+#include <iostream>
+#include "Tuner.h"
using namespace std;
@@ -35,10 +37,11 @@
using ::android::hardware::tv::tuner::V1_0::IFrontendCallback;
using ::android::hardware::tv::tuner::V1_0::Result;
+class Tuner;
+
class Frontend : public IFrontend {
public:
- Frontend();
- Frontend(FrontendType type, FrontendId id);
+ Frontend(FrontendType type, FrontendId id, sp<Tuner> tuner);
virtual Return<Result> close() override;
@@ -48,15 +51,33 @@
virtual Return<Result> stopTune() override;
+ virtual Return<Result> scan(const FrontendSettings& settings, FrontendScanType type) override;
+
+ virtual Return<Result> stopScan() override;
+
+ virtual Return<void> getStatus(const hidl_vec<FrontendStatusType>& statusTypes,
+ getStatus_cb _hidl_cb) override;
+
+ virtual Return<Result> setLna(bool bEnable) override;
+
+ virtual Return<Result> setLnb(uint32_t lnb) override;
+
FrontendType getFrontendType();
FrontendId getFrontendId();
+ string getSourceFile();
+
private:
virtual ~Frontend();
sp<IFrontendCallback> mCallback;
+ sp<Tuner> mTunerService;
FrontendType mType = FrontendType::UNDEFINED;
FrontendId mId = 0;
+
+ const string FRONTEND_STREAM_FILE = "/vendor/etc/test1.ts";
+ string mSourceStreamFile;
+ std::ifstream mFrontendData;
};
} // namespace implementation
diff --git a/tv/tuner/1.0/default/Lnb.cpp b/tv/tuner/1.0/default/Lnb.cpp
new file mode 100644
index 0000000..1446f7f
--- /dev/null
+++ b/tv/tuner/1.0/default/Lnb.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "android.hardware.tv.tuner@1.0-Lnb"
+
+#include "Lnb.h"
+#include <utils/Log.h>
+
+namespace android {
+namespace hardware {
+namespace tv {
+namespace tuner {
+namespace V1_0 {
+namespace implementation {
+
+Lnb::Lnb() {}
+
+Lnb::~Lnb() {}
+
+Return<Result> Lnb::setVoltage(FrontendLnbVoltage /* voltage */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Lnb::setTone(FrontendLnbTone /* tone */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Lnb::setSatellitePosition(FrontendLnbPosition /* position */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Lnb::sendDiseqcMessage(const hidl_vec<uint8_t>& /* diseqcMessage */) {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+Return<Result> Lnb::close() {
+ ALOGV("%s", __FUNCTION__);
+
+ return Result::SUCCESS;
+}
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace tuner
+} // namespace tv
+} // namespace hardware
+} // namespace android
\ No newline at end of file
diff --git a/tv/tuner/1.0/default/Lnb.h b/tv/tuner/1.0/default/Lnb.h
new file mode 100644
index 0000000..4c251f7
--- /dev/null
+++ b/tv/tuner/1.0/default/Lnb.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_TV_TUNER_V1_0_LNB_H_
+#define ANDROID_HARDWARE_TV_TUNER_V1_0_LNB_H_
+
+#include <android/hardware/tv/tuner/1.0/ILnb.h>
+#include <android/hardware/tv/tuner/1.0/ITuner.h>
+
+using namespace std;
+
+namespace android {
+namespace hardware {
+namespace tv {
+namespace tuner {
+namespace V1_0 {
+namespace implementation {
+
+using ::android::hardware::tv::tuner::V1_0::FrontendLnbPosition;
+using ::android::hardware::tv::tuner::V1_0::FrontendLnbTone;
+using ::android::hardware::tv::tuner::V1_0::FrontendLnbVoltage;
+using ::android::hardware::tv::tuner::V1_0::Result;
+
+class Lnb : public ILnb {
+ public:
+ Lnb();
+
+ virtual Return<Result> setVoltage(FrontendLnbVoltage voltage) override;
+
+ virtual Return<Result> setTone(FrontendLnbTone tone) override;
+
+ virtual Return<Result> setSatellitePosition(FrontendLnbPosition position) override;
+
+ virtual Return<Result> sendDiseqcMessage(const hidl_vec<uint8_t>& diseqcMessage) override;
+
+ virtual Return<Result> close() override;
+
+ private:
+ virtual ~Lnb();
+};
+
+} // namespace implementation
+} // namespace V1_0
+} // namespace tuner
+} // namespace tv
+} // namespace hardware
+} // namespace android
+
+#endif // ANDROID_HARDWARE_TV_TUNER_V1_0_LNB_H_
\ No newline at end of file
diff --git a/tv/tuner/1.0/default/Tuner.cpp b/tv/tuner/1.0/default/Tuner.cpp
index 68b3436..f86b28d 100644
--- a/tv/tuner/1.0/default/Tuner.cpp
+++ b/tv/tuner/1.0/default/Tuner.cpp
@@ -22,6 +22,7 @@
#include "Demux.h"
#include "Descrambler.h"
#include "Frontend.h"
+#include "Lnb.h"
namespace android {
namespace hardware {
@@ -37,14 +38,14 @@
// Array index matches their FrontendId in the default impl
mFrontendSize = 8;
mFrontends.resize(mFrontendSize);
- mFrontends[0] = new Frontend();
- mFrontends[1] = new Frontend(FrontendType::ATSC, 1);
- mFrontends[2] = new Frontend(FrontendType::DVBC, 2);
- mFrontends[3] = new Frontend(FrontendType::DVBS, 3);
- mFrontends[4] = new Frontend(FrontendType::DVBT, 4);
- mFrontends[5] = new Frontend(FrontendType::ISDBT, 5);
- mFrontends[6] = new Frontend(FrontendType::ANALOG, 6);
- mFrontends[7] = new Frontend(FrontendType::ATSC, 7);
+ mFrontends[0] = new Frontend(FrontendType::DVBT, 0, this);
+ mFrontends[1] = new Frontend(FrontendType::ATSC, 1, this);
+ mFrontends[2] = new Frontend(FrontendType::DVBC, 2, this);
+ mFrontends[3] = new Frontend(FrontendType::DVBS, 3, this);
+ mFrontends[4] = new Frontend(FrontendType::DVBT, 4, this);
+ mFrontends[5] = new Frontend(FrontendType::ISDBT, 5, this);
+ mFrontends[6] = new Frontend(FrontendType::ANALOG, 6, this);
+ mFrontends[7] = new Frontend(FrontendType::ATSC, 7, this);
}
Tuner::~Tuner() {}
@@ -80,12 +81,22 @@
DemuxId demuxId = mLastUsedId + 1;
mLastUsedId += 1;
- sp<IDemux> demux = new Demux(demuxId);
+ sp<Demux> demux = new Demux(demuxId, this);
+ mDemuxes[demuxId] = demux;
_hidl_cb(Result::SUCCESS, demuxId, demux);
return Void();
}
+Return<void> Tuner::getDemuxCaps(getDemuxCaps_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ DemuxCapabilities caps;
+
+ _hidl_cb(Result::SUCCESS, caps);
+ return Void();
+}
+
Return<void> Tuner::openDescrambler(openDescrambler_cb _hidl_cb) {
ALOGV("%s", __FUNCTION__);
@@ -95,6 +106,52 @@
return Void();
}
+Return<void> Tuner::getFrontendInfo(FrontendId /* frontendId */, getFrontendInfo_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ FrontendInfo info;
+
+ _hidl_cb(Result::SUCCESS, info);
+ return Void();
+}
+
+Return<void> Tuner::getLnbIds(getLnbIds_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ vector<LnbId> lnbIds;
+
+ _hidl_cb(Result::SUCCESS, lnbIds);
+ return Void();
+}
+
+Return<void> Tuner::openLnbById(LnbId /* lnbId */, openLnbById_cb _hidl_cb) {
+ ALOGV("%s", __FUNCTION__);
+
+ sp<ILnb> lnb = new Lnb();
+
+ _hidl_cb(Result::SUCCESS, lnb);
+ return Void();
+}
+
+sp<Frontend> Tuner::getFrontendById(uint32_t frontendId) {
+ ALOGV("%s", __FUNCTION__);
+
+ return mFrontends[frontendId];
+}
+
+void Tuner::setFrontendAsDemuxSource(uint32_t frontendId, uint32_t demuxId) {
+ mFrontendToDemux[frontendId] = demuxId;
+}
+
+void Tuner::frontendStopTune(uint32_t frontendId) {
+ map<uint32_t, uint32_t>::iterator it = mFrontendToDemux.find(frontendId);
+ uint32_t demuxId;
+ if (it != mFrontendToDemux.end()) {
+ demuxId = it->second;
+ mDemuxes[demuxId]->stopBroadcastInput();
+ }
+}
+
} // namespace implementation
} // namespace V1_0
} // namespace tuner
diff --git a/tv/tuner/1.0/default/Tuner.h b/tv/tuner/1.0/default/Tuner.h
index 12e9594..96da257 100644
--- a/tv/tuner/1.0/default/Tuner.h
+++ b/tv/tuner/1.0/default/Tuner.h
@@ -18,6 +18,8 @@
#define ANDROID_HARDWARE_TV_TUNER_V1_0_TUNER_H_
#include <android/hardware/tv/tuner/1.0/ITuner.h>
+#include <map>
+#include "Demux.h"
#include "Frontend.h"
using namespace std;
@@ -29,6 +31,9 @@
namespace V1_0 {
namespace implementation {
+class Frontend;
+class Demux;
+
class Tuner : public ITuner {
public:
Tuner();
@@ -39,12 +44,29 @@
virtual Return<void> openDemux(openDemux_cb _hidl_cb) override;
+ virtual Return<void> getDemuxCaps(getDemuxCaps_cb _hidl_cb) override;
+
virtual Return<void> openDescrambler(openDescrambler_cb _hidl_cb) override;
+ virtual Return<void> getFrontendInfo(FrontendId frontendId,
+ getFrontendInfo_cb _hidl_cb) override;
+
+ virtual Return<void> getLnbIds(getLnbIds_cb _hidl_cb) override;
+
+ virtual Return<void> openLnbById(LnbId lnbId, openLnbById_cb _hidl_cb) override;
+
+ sp<Frontend> getFrontendById(uint32_t frontendId);
+
+ void setFrontendAsDemuxSource(uint32_t frontendId, uint32_t demuxId);
+
+ void frontendStopTune(uint32_t frontendId);
+
private:
virtual ~Tuner();
// Static mFrontends array to maintain local frontends information
vector<sp<Frontend>> mFrontends;
+ std::map<uint32_t, uint32_t> mFrontendToDemux;
+ std::map<uint32_t, sp<Demux>> mDemuxes;
// To maintain how many Frontends we have
int mFrontendSize;
// The last used demux id. Initial value is -1.
diff --git a/tv/tuner/1.0/default/service.cpp b/tv/tuner/1.0/default/service.cpp
index c360fcf..7bbc09e 100644
--- a/tv/tuner/1.0/default/service.cpp
+++ b/tv/tuner/1.0/default/service.cpp
@@ -21,7 +21,6 @@
#define LOG_TAG "android.hardware.tv.tuner@1.0-service"
#endif
-#include <binder/ProcessState.h>
#include <hidl/HidlTransportSupport.h>
#include <hidl/LegacySupport.h>
diff --git a/tv/tuner/1.0/types.hal b/tv/tuner/1.0/types.hal
index 94e70f5..890c1ed 100644
--- a/tv/tuner/1.0/types.hal
+++ b/tv/tuner/1.0/types.hal
@@ -41,42 +41,121 @@
enum FrontendType : uint32_t {
UNDEFINED = 0,
ANALOG,
+ /* Advanced Television Systems Committee (ATSC) Standard A/72. */
ATSC,
+ /* Advanced Television Systems Committee (ATSC 3.0) Standard A/300. */
+ ATSC3,
+ /**
+ * Digital Video Broadcasting - Cable
+ * DVB Cable Frontend Standard ETSI EN 300 468 V1.15.1.
+ */
DVBC,
+ /**
+ * Digital Video Broadcasting - Satellite
+ * DVB Satellite Frontend Standard ETSI EN 300 468 V1.15.1 and
+ * ETSI EN 302 307-2 V1.1.1.
+ */
DVBS,
+ /**
+ * Digital Video Broadcasting - Terrestrial
+ * DVB Terrestrial Frontend Standard ETSI EN 300 468 V1.15.1 and
+ * ETSI EN 302 755 V1.4.1.
+ */
DVBT,
+ /* Integrated Services Digital Broadcasting-Satellite (ISDB-S)
+ * ARIB STD-B20 is technical document of ISDB-S.
+ */
+ ISDBS,
+ /* Integrated Services Digital Broadcasting-Satellite (ISDB-S)
+ * ARIB STD-B44 is technical document of ISDB-S3.
+ */
+ ISDBS3,
+ /* Integrated Services Digital Broadcasting-Terrestrial (ISDB-T or SBTVD)
+ * ABNT NBR 15603 is technical document of ISDB-T.
+ */
ISDBT,
};
/**
* Inner Forward Error Correction type as specified in ETSI EN 300 468 V1.15.1
- * It's a 4-bit field specifying the inner FEC scheme used according to the
- * table 35 in the spec.
+ * and ETSI EN 302 307-2 V1.1.1.
*/
@export
-enum FrontendInnerFec : uint32_t {
+enum FrontendInnerFec : uint64_t {
/* Not defined */
FEC_UNDEFINED = 0,
- /* 1/2 conv. code rate */
- FEC_1_2 = 1 << 0,
- /* 2/3 conv. code rate */
- FEC_2_3 = 1 << 1,
- /* 3/4 conv. code rate */
- FEC_3_4 = 1 << 2,
- /* 5/6 conv. code rate */
- FEC_5_6 = 1 << 3,
- /* 7/8 conv. code rate */
- FEC_7_8 = 1 << 4,
- /* 8/9 conv. code rate */
- FEC_8_9 = 1 << 5,
- /* 3/5 conv. code rate */
- FEC_3_5 = 1 << 6,
- /* 4/5 conv. code rate */
- FEC_4_5 = 1 << 7,
- /* 9/10 conv. code rate */
- FEC_9_10 = 1 << 8,
/* hardware is able to detect and set FEC automatically */
- FEC_AUTO = 1 << 9,
+ AUTO = 1 << 0,
+ /* 1/2 conv. code rate */
+ FEC_1_2 = 1 << 1,
+ /* 1/3 conv. code rate */
+ FEC_1_3 = 1 << 2,
+ /* 1/4 conv. code rate */
+ FEC_1_4 = 1 << 3,
+ /* 1/5 conv. code rate */
+ FEC_1_5 = 1 << 4,
+ /* 2/3 conv. code rate */
+ FEC_2_3 = 1 << 5,
+ /* 2/5 conv. code rate */
+ FEC_2_5 = 1 << 6,
+ /* 2/9 conv. code rate */
+ FEC_2_9 = 1 << 7,
+ /* 3/4 conv. code rate */
+ FEC_3_4 = 1 << 8,
+ /* 3/5 conv. code rate */
+ FEC_3_5 = 1 << 9,
+ /* 4/5 conv. code rate */
+ FEC_4_5 = 1 << 10,
+ /* 4/15 conv. code rate */
+ FEC_4_15 = 1 << 11,
+ /* 5/6 conv. code rate */
+ FEC_5_6 = 1 << 12,
+ /* 5/9 conv. code rate */
+ FEC_5_9 = 1 << 13,
+ /* 6/7 conv. code rate */
+ FEC_6_7 = 1 << 14,
+ /* 7/8 conv. code rate */
+ FEC_7_8 = 1 << 15,
+ /* 7/9 conv. code rate */
+ FEC_7_9 = 1 << 16,
+ /* 7/15 conv. code rate */
+ FEC_7_15 = 1 << 17,
+ /* 8/9 conv. code rate */
+ FEC_8_9 = 1 << 18,
+ /* 8/15 conv. code rate */
+ FEC_8_15 = 1 << 19,
+ /* 9/10 conv. code rate */
+ FEC_9_10 = 1 << 20,
+ /* 9/20 conv. code rate */
+ FEC_9_20 = 1 << 21,
+ /* 11/15 conv. code rate */
+ FEC_11_15 = 1 << 22,
+ /* 11/20 conv. code rate */
+ FEC_11_20 = 1 << 23,
+ /* 11/45 conv. code rate */
+ FEC_11_45 = 1 << 24,
+ /* 13/18 conv. code rate */
+ FEC_13_18 = 1 << 25,
+ /* 13/45 conv. code rate */
+ FEC_13_45 = 1 << 26,
+ /* 14/45 conv. code rate */
+ FEC_14_45 = 1 << 27,
+ /* 23/36 conv. code rate */
+ FEC_23_36 = 1 << 28,
+ /* 25/36 conv. code rate */
+ FEC_25_36 = 1 << 29,
+ /* 26/45 conv. code rate */
+ FEC_26_45 = 1 << 30,
+ /* 28/45 conv. code rate */
+ FEC_28_45 = 1 << 31,
+ /* 29/45 conv. code rate */
+ FEC_29_45 = 1 << 32,
+ /* 31/45 conv. code rate */
+ FEC_31_45 = 1 << 33,
+ /* 32/45 conv. code rate */
+ FEC_32_45 = 1 << 34,
+ /* 77/90 conv. code rate */
+ FEC_77_90 = 1 << 35,
};
/**
@@ -85,8 +164,10 @@
@export
enum FrontendAtscModulation : uint32_t {
UNDEFINED = 0,
- MOD_8VSB = 1 << 0,
- MOD_16VSB = 1 << 1,
+ /** hardware is able to detect and set modulation automatically */
+ AUTO = 1 << 0,
+ MOD_8VSB = 1 << 2,
+ MOD_16VSB = 1 << 3,
};
/**
@@ -99,21 +180,812 @@
};
/**
+ * Capabilities for ATSC Frontend.
+ */
+struct FrontendAtscCapabilities {
+ /** Modulation capability */
+ bitfield<FrontendAtscModulation> modulationCap;
+};
+
+/**
+ * Modulation Type for ATSC3.
+ */
+@export
+enum FrontendAtsc3Modulation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set modulation automatically */
+ AUTO = 1 << 0,
+ MOD_QPSK = 1 << 1,
+ MOD_16QAM = 1 << 2,
+ MOD_64QAM = 1 << 3,
+ MOD_256QAM = 1 << 4,
+ MOD_1024QAM = 1 << 5,
+ MOD_4096QAM = 1 << 6,
+};
+
+/**
+ * Bandwidth for ATSC3.
+ */
+@export
+enum FrontendAtsc3Bandwidth : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set bandwidth automatically */
+ AUTO = 1 << 0,
+ BANDWIDTH_6MHZ = 1 << 1,
+ BANDWIDTH_7MHZ = 1 << 2,
+ BANDWIDTH_8MHZ = 1 << 3,
+};
+
+/**
+ * Time Interleave Mode for ATSC3.
+ */
+@export
+enum FrontendAtsc3TimeInterleaveMode : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set TimeInterleaveMode automatically */
+ AUTO = 1 << 0,
+ CTI = 1 << 1,
+ HTI = 1 << 2,
+};
+
+/**
+ * Code Rate for ATSC3.
+ */
+@export
+enum FrontendAtsc3CodeRate : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Coderate automatically */
+ AUTO = 1 << 0,
+ CODERATE_2_15 = 1 << 1,
+ CODERATE_3_15 = 1 << 2,
+ CODERATE_4_15 = 1 << 3,
+ CODERATE_5_15 = 1 << 4,
+ CODERATE_6_15 = 1 << 5,
+ CODERATE_7_15 = 1 << 6,
+ CODERATE_8_15 = 1 << 7,
+ CODERATE_9_15 = 1 << 8,
+ CODERATE_10_15 = 1 << 9,
+ CODERATE_11_15 = 1 << 10,
+ CODERATE_12_15 = 1 << 11,
+ CODERATE_13_15 = 1 << 12,
+};
+
+/**
+ * Forward Error Correction (FEC) for ATSC3.
+ */
+@export
+enum FrontendAtsc3Fec : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set FEC automatically */
+ AUTO = 1 << 0,
+ BCH_LDPC_16K = 1 << 1,
+ BCH_LDPC_64K = 1 << 2,
+ CRC_LDPC_16K = 1 << 3,
+ CRC_LDPC_64K = 1 << 4,
+ LDPC_16K = 1 << 5,
+ LDPC_64K = 1 << 6,
+};
+
+/**
+ * Demodulator Output Format for an ATSC3 Frontend.
+ */
+@export
+enum FrontendAtsc3DemodOutputFormat : uint8_t {
+ /** Dummy. Scan uses this. */
+ UNDEFINED = 0,
+ /** ALP format. Typically used in US region. */
+ ATSC3_LINKLAYER_PACKET = 1 << 0,
+ /** BaseBand packet format. Typically used in Korea region. */
+ BASEBAND_PACKET = 1 << 1,
+};
+
+/**
+ * PLP basis Signal Settings for an ATSC3 Frontend.
+ */
+struct FrontendAtsc3PlpSettings {
+ uint8_t plpId;
+ FrontendAtsc3Modulation modulation;
+ FrontendAtsc3TimeInterleaveMode interleaveMode;
+ FrontendAtsc3CodeRate codeRate;
+ FrontendAtsc3Fec fec;
+};
+
+/**
+ * Signal Settings for an ATSC3 Frontend.
+ */
+struct FrontendAtsc3Settings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ /** Bandwidth of tuning band. */
+ FrontendAtsc3Bandwidth bandwidth;
+ FrontendAtsc3DemodOutputFormat demodOutputFormat;
+ vec<FrontendAtsc3PlpSettings> plpSettings;
+};
+
+/**
+ * Capabilities for ATSC3 Frontend.
+ */
+struct FrontendAtsc3Capabilities {
+ /** Bandwidth capability */
+ bitfield<FrontendAtsc3Bandwidth> bandwidthCap;
+ /** Modulation capability */
+ bitfield<FrontendAtsc3Modulation> modulationCap;
+ /** TimeInterleaveMode capability */
+ bitfield<FrontendAtsc3TimeInterleaveMode> timeInterleaveModeCap;
+ /** CodeRate capability */
+ bitfield<FrontendAtsc3CodeRate> codeRateCap;
+ /** FEC capability */
+ bitfield<FrontendAtsc3Fec> fecCap;
+ /** Demodulator Output Format capability */
+ bitfield<FrontendAtsc3DemodOutputFormat> demodOutputFormatCap;
+};
+
+/**
+ * Modulation Type for DVBS.
+ */
+@export
+enum FrontendDvbsModulation : int32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Modulation automatically */
+ AUTO = 1 << 0,
+ MOD_QPSK = 1 << 1,
+ MOD_8PSK = 1 << 2,
+ MOD_16QAM = 1 << 3,
+ MOD_16PSK = 1 << 4,
+ MOD_32PSK = 1 << 5,
+ MOD_ACM = 1 << 6,
+ MOD_8APSK = 1 << 7,
+ MOD_16APSK = 1 << 8,
+ MOD_32APSK = 1 << 9,
+ MOD_64APSK = 1 << 10,
+ MOD_128APSK = 1 << 11,
+ MOD_256APSK = 1 << 12,
+ /** Reserved for Proprietary modulation */
+ MOD_RESERVED = 1 << 13,
+};
+
+/**
+ * Roll Off value for DVBS.
+ */
+@export
+enum FrontendDvbsRolloff : uint32_t {
+ UNDEFINED,
+ ROLLOFF_0_35,
+ ROLLOFF_0_25,
+ ROLLOFF_0_20,
+ ROLLOFF_0_15,
+ ROLLOFF_0_10,
+ ROLLOFF_0_5,
+};
+
+/**
+ * Pilot mode for DVBS.
+ */
+@export
+enum FrontendDvbsPilot : uint32_t {
+ UNDEFINED,
+ ON,
+ OFF,
+ AUTO,
+};
+
+/**
+ * Code Rate for DVBS.
+ */
+struct FrontendDvbsCodeRate {
+ FrontendInnerFec fec;
+ bool isLinear;
+ /* true if enable short frame */
+ bool isShortFrames;
+ /* bits number in 1000 symbol. 0 if use the default. */
+ uint32_t bitsPer1000Symbol;
+};
+
+/**
+ * Sub standards in DVBS.
+ */
+@export
+enum FrontendDvbsStandard : uint8_t {
+ AUTO = 1 << 0,
+ S = 1 << 1,
+ S2 = 1 << 2,
+ S2X = 1 << 3,
+};
+
+/**
+ * Signal Settings for an DVBS Frontend.
+ */
+struct FrontendDvbsSettings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ FrontendDvbsModulation modulation;
+ FrontendDvbsCodeRate coderate;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ FrontendDvbsRolloff rolloff;
+ FrontendDvbsPilot pilot;
+ uint32_t inputStreamId;
+ FrontendDvbsStandard standard;
+};
+
+/**
+ * Capabilities for DVBS Frontend.
+ */
+struct FrontendDvbsCapabilities {
+ bitfield<FrontendDvbsModulation> modulationCap;
+ bitfield<FrontendInnerFec> innerfecCap;
+ bitfield<FrontendDvbsStandard> standard;
+};
+
+/**
+ * Modulation Type for DVBC.
+ */
+@export
+enum FrontendDvbcModulation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Modulation automatically */
+ AUTO = 1 << 0,
+ MOD_16QAM = 1 << 1,
+ MOD_32QAM = 1 << 2,
+ MOD_64QAM = 1 << 3,
+ MOD_128QAM = 1 << 4,
+ MOD_256QAM = 1 << 5,
+};
+
+/**
+ * Outer Forward Error Correction (FEC) Type for DVBC.
+ */
+@export
+enum FrontendDvbcOuterFec : uint32_t {
+ UNDEFINED = 0,
+ OUTER_FEC_NONE,
+ OUTER_FEC_RS,
+};
+
+/**
+ * Annex Type for DVBC.
+ */
+@export
+enum FrontendDvbcAnnex : uint8_t {
+ UNDEFINED = 0,
+ A = 1 << 0,
+ B = 1 << 1,
+ C = 1 << 2,
+};
+
+/**
+ * Spectral Inversion Type for DVBC.
+ */
+@export
+enum FrontendDvbcSpectralInversion : uint32_t {
+ UNDEFINED,
+ NORMAL,
+ INVERTED,
+};
+
+/**
+ * Signal Settings for an DVBC Frontend.
+ */
+struct FrontendDvbcSettings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ FrontendDvbcModulation modulation;
+ FrontendInnerFec fec;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ FrontendDvbcOuterFec outerFec;
+ FrontendDvbcAnnex annex;
+ FrontendDvbcSpectralInversion spectralInversion;
+};
+
+/**
+ * Capabilities for DVBC Frontend.
+ */
+struct FrontendDvbcCapabilities {
+ bitfield<FrontendDvbcModulation> modulationCap;
+ bitfield<FrontendInnerFec> fecCap;
+ bitfield<FrontendDvbcAnnex> annexCap;
+};
+
+/**
+ * Bandwidth Type for DVBT.
+ */
+@export
+enum FrontendDvbtBandwidth : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Bandwidth automatically */
+ AUTO = 1 << 0,
+ BANDWIDTH_8MHZ = 1 << 1,
+ BANDWIDTH_7MHZ = 1 << 2,
+ BANDWIDTH_6MHZ = 1 << 3,
+ BANDWIDTH_5MHZ = 1 << 4,
+ BANDWIDTH_1_7MHZ = 1 << 5,
+ BANDWIDTH_10MHZ = 1 << 6,
+};
+
+/**
+ * Constellation Type for DVBT.
+ */
+@export
+enum FrontendDvbtConstellation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Constellation automatically */
+ AUTO = 1 << 0,
+ CONSTELLATION_QPSK = 1 << 1,
+ CONSTELLATION_16QAM = 1 << 2,
+ CONSTELLATION_64QAM = 1 << 3,
+ CONSTELLATION_256QAM = 1 << 4,
+};
+
+/**
+ * Hierarchy Type for DVBT.
+ */
+@export
+enum FrontendDvbtHierarchy : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Hierarchy automatically */
+ AUTO = 1 << 0,
+ HIERARCHY_NON_NATIVE = 1 << 1,
+ HIERARCHY_1_NATIVE = 1 << 2,
+ HIERARCHY_2_NATIVE = 1 << 3,
+ HIERARCHY_4_NATIVE = 1 << 4,
+ HIERARCHY_NON_INDEPTH = 1 << 5,
+ HIERARCHY_1_INDEPTH = 1 << 6,
+ HIERARCHY_2_INDEPTH = 1 << 7,
+ HIERARCHY_4_INDEPTH = 1 << 8,
+};
+
+/**
+ * Hierarchy Type for DVBT.
+ */
+@export
+enum FrontendDvbtCoderate : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Hierarchy automatically */
+ AUTO = 1 << 0,
+ CODERATE_1_2 = 1 << 1,
+ CODERATE_2_3 = 1 << 2,
+ CODERATE_3_4 = 1 << 3,
+ CODERATE_5_6 = 1 << 4,
+ CODERATE_7_8 = 1 << 5,
+ CODERATE_3_5 = 1 << 6,
+ CODERATE_4_5 = 1 << 7,
+ CODERATE_6_7 = 1 << 8,
+ CODERATE_8_9 = 1 << 9,
+};
+
+/**
+ * Guard Interval Type for DVBT.
+ */
+@export
+enum FrontendDvbtGuardInterval : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Guard Interval automatically */
+ AUTO = 1 << 0,
+ INTERVAL_1_32 = 1 << 1,
+ INTERVAL_1_16 = 1 << 2,
+ INTERVAL_1_8 = 1 << 3,
+ INTERVAL_1_4 = 1 << 4,
+ INTERVAL_1_128 = 1 << 5,
+ INTERVAL_19_128 = 1 << 6,
+ INTERVAL_19_256 = 1 << 7,
+};
+
+/**
+ * Transmission Mode for DVBT.
+ */
+@export
+enum FrontendDvbtTransmissionMode : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Transmission Mode automatically */
+ AUTO = 1 << 0,
+ MODE_2K = 1 << 1,
+ MODE_8K = 1 << 2,
+ MODE_4K = 1 << 3,
+ MODE_1K = 1 << 4,
+ MODE_16K = 1 << 5,
+ MODE_32K = 1 << 6,
+};
+
+/**
+ * Physical Layer Pipe (PLP) Mode for DVBT.
+ */
+enum FrontendDvbtPlpMode : uint32_t {
+ UNDEFINED,
+ AUTO,
+ MANUAL,
+};
+
+/**
+ * Sub standards in DVBT.
+ */
+@export
+enum FrontendDvbtStandard : uint8_t {
+ AUTO = 1 << 0,
+ T = 1 << 1,
+ T2 = 1 << 2,
+};
+
+/**
* Signal Setting for DVBT Frontend.
*/
struct FrontendDvbtSettings {
/** Signal frequency in Hertz */
uint32_t frequency;
- FrontendAtscModulation modulation;
- FrontendInnerFec fec;
+ FrontendDvbtTransmissionMode transmissionMode;
+ FrontendDvbtBandwidth bandwidth;
+ FrontendDvbtConstellation constellation;
+ FrontendDvbtHierarchy hierarchy;
+ /** Code Rate for High Priority level */
+ FrontendDvbtCoderate hpCoderate;
+ /** Code Rate for Low Priority level */
+ FrontendDvbtCoderate lpCoderate;
+ FrontendDvbtGuardInterval guardInterval;
+ bool isHighPriority;
+ FrontendDvbtStandard standard;
+ bool isMiso;
+ FrontendDvbtPlpMode plpMode;
+ /** Physical Layer Pipe (PLP) Id */
+ uint8_t plpId;
+ /** Group Id for Physical Layer Pipe (PLP) */
+ uint8_t plpGroupId;
};
/**
- * Modulation Type for ATSC.
+ * Capabilities for DVBT Frontend.
+ */
+struct FrontendDvbtCapabilities {
+ bitfield<FrontendDvbtTransmissionMode> transmissionModeCap;
+ bitfield<FrontendDvbtBandwidth> bandwidthCap;
+ bitfield<FrontendDvbtConstellation> constellationCap;
+ bitfield<FrontendDvbtCoderate> coderateCap;
+ bitfield<FrontendDvbtHierarchy> hierarchyCap;
+ bitfield<FrontendDvbtGuardInterval> guardIntervalCap;
+ bool isT2Supported;
+ bool isMisoSupported;
+};
+
+/**
+ * Roll Off Type for ISDBS.
+ */
+@export
+enum FrontendIsdbsRolloff : uint32_t {
+ UNDEFINED,
+ ROLLOFF_0_35,
+};
+
+/**
+ * Modulation Type for ISDBS.
+ */
+@export
+enum FrontendIsdbsModulation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Modulation automatically */
+ AUTO = 1 << 0,
+ MOD_BPSK = 1 << 1,
+ MOD_QPSK = 1 << 2,
+ MOD_TC8PSK = 1 << 3,
+};
+
+/**
+ * Code Rate Type for ISDBS.
+ */
+@export
+enum FrontendIsdbsCoderate : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Code Rate automatically */
+ AUTO = 1 << 0,
+ CODERATE_1_2 = 1 << 1,
+ CODERATE_2_3 = 1 << 2,
+ CODERATE_3_4 = 1 << 3,
+ CODERATE_5_6 = 1 << 4,
+ CODERATE_7_8 = 1 << 5,
+};
+
+/**
+ * Stream Id Type for ISDBS.
+ */
+@export
+enum FrontendIsdbsStreamIdType : uint32_t {
+ STREAM_ID,
+ RELATIVE_STREAM_NUMBER,
+};
+
+/**
+ * Signal Setting for ISDBS Frontend.
+ */
+struct FrontendIsdbsSettings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ uint16_t streamId;
+ FrontendIsdbsStreamIdType streamIdType;
+ FrontendIsdbsModulation modulation;
+ FrontendIsdbsCoderate coderate;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ FrontendIsdbsRolloff rolloff;
+};
+
+/**
+ * Capabilities for ISDBS Frontend.
+ */
+struct FrontendIsdbsCapabilities {
+ bitfield<FrontendIsdbsModulation> modulationCap;
+ bitfield<FrontendIsdbsCoderate> coderateCap;
+};
+
+/**
+ * Roll of Type for ISDBS3.
+ */
+@export
+enum FrontendIsdbs3Rolloff : uint32_t {
+ UNDEFINED,
+ ROLLOFF_0_03,
+};
+
+/**
+ * Modulaltion Type for ISDBS3.
+ */
+@export
+enum FrontendIsdbs3Modulation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Modulation automatically */
+ AUTO = 1 << 5,
+ MOD_BPSK = 1 << 1,
+ MOD_QPSK = 1 << 2,
+ MOD_8PSK = 1 << 3,
+ MOD_16APSK = 1 << 4,
+ MOD_32APSK = 1 << 5,
+};
+
+/**
+ * Code Rate Type for ISDBS3.
+ */
+@export
+enum FrontendIsdbs3Coderate : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Code Rate automatically */
+ AUTO = 1 << 0,
+ CODERATE_1_3 = 1 << 1,
+ CODERATE_2_5 = 1 << 2,
+ CODERATE_1_2 = 1 << 3,
+ CODERATE_3_5 = 1 << 4,
+ CODERATE_2_3 = 1 << 5,
+ CODERATE_3_4 = 1 << 6,
+ CODERATE_7_9 = 1 << 7,
+ CODERATE_4_5 = 1 << 8,
+ CODERATE_5_6 = 1 << 9,
+ CODERATE_7_8 = 1 << 10,
+ CODERATE_9_10 = 1 << 11,
+};
+
+/**
+ * Signal Setting for ISDBS3 Frontend.
+ */
+struct FrontendIsdbs3Settings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ uint16_t streamId;
+ FrontendIsdbsStreamIdType streamIdType;
+ FrontendIsdbs3Modulation modulation;
+ FrontendIsdbs3Coderate coderate;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ FrontendIsdbs3Rolloff rolloff;
+};
+
+/**
+ * Capabilities for ISDBS3 Frontend.
+ */
+struct FrontendIsdbs3Capabilities {
+ bitfield<FrontendIsdbs3Modulation> modulationCap;
+ bitfield<FrontendIsdbs3Coderate> coderateCap;
+};
+
+/**
+ * Mode for ISDBT.
+ */
+@export
+enum FrontendIsdbtMode : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Mode automatically */
+ AUTO = 1 << 0,
+ MODE_1 = 1 << 1,
+ MODE_2 = 1 << 2,
+ MODE_3 = 1 << 3,
+};
+
+/**
+ * Bandwidth for ISDBT.
+ */
+@export
+enum FrontendIsdbtBandwidth : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Bandwidth automatically */
+ AUTO = 1 << 0,
+ BANDWIDTH_8MHZ = 1 << 1,
+ BANDWIDTH_7MHZ = 1 << 2,
+ BANDWIDTH_6MHZ = 1 << 3,
+};
+
+/**
+ * Modulation for ISDBT.
+ */
+@export
+enum FrontendIsdbtModulation : uint32_t {
+ UNDEFINED = 0,
+ /** hardware is able to detect and set Modulation automatically */
+ AUTO = 1 << 0,
+ MOD_DQPSK = 1 << 1,
+ MOD_QPSK = 1 << 2,
+ MOD_16QAM = 1 << 3,
+ MOD_64QAM = 1 << 4,
+};
+
+/** Code Rate for ISDBT. */
+typedef FrontendDvbtCoderate FrontendIsdbtCoderate;
+
+/** Guard Interval for ISDBT. */
+typedef FrontendDvbtGuardInterval FrontendIsdbtGuardInterval;
+
+/**
+ * Signal Setting for ISDBT Frontend.
+ */
+struct FrontendIsdbtSettings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ FrontendIsdbtModulation modulation;
+ FrontendIsdbtBandwidth bandwidth;
+ FrontendIsdbtMode mode;
+ FrontendIsdbtCoderate coderate;
+ FrontendIsdbtGuardInterval guardInterval;
+ uint32_t serviceAreaId;
+};
+
+/**
+ * Capabilities for ISDBT Frontend.
+ */
+struct FrontendIsdbtCapabilities {
+ bitfield<FrontendIsdbtMode> modeCap;
+ bitfield<FrontendIsdbtBandwidth> bandwidthCap;
+ bitfield<FrontendIsdbtModulation> constellationCap;
+ bitfield<FrontendIsdbtCoderate> coderateCap;
+ bitfield<FrontendIsdbtGuardInterval> guardIntervalCap;
+};
+
+/**
+ * Signal Type for Analog Frontend.
+ */
+@export
+enum FrontendAnalogType : uint32_t {
+ UNDEFINED = 0,
+ PAL = 1 << 0,
+ SECAM = 1 << 1,
+ NTSC = 1 << 2,
+};
+
+/**
+ * Standard Interchange Format (SIF) for Analog Frontend.
+ */
+@export
+enum FrontendAnalogSifStandard : uint32_t {
+ UNDEFINED = 0,
+ BG = 1 << 0,
+ BG_A2 = 1 << 1,
+ BG_NICAM = 1 << 2,
+ I = 1 << 3,
+ DK = 1 << 4,
+ DK1 = 1 << 5,
+ DK2 = 1 << 6,
+ DK3 = 1 << 7,
+ DK_NICAM = 1 << 8,
+ L = 1 << 9,
+ M = 1 << 10,
+ M_BTSC = 1 << 11,
+ M_A2 = 1 << 12,
+ M_EIA_J = 1 << 13,
+ I_NICAM = 1 << 14,
+ L_NICAM = 1 << 15,
+ L_PRIME = 1 << 16,
+};
+
+/**
+ * Signal Setting for Analog Frontend.
+ */
+struct FrontendAnalogSettings {
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ FrontendAnalogType type;
+ FrontendAnalogSifStandard sifStandard;
+};
+
+/**
+ * Capabilities for Analog Frontend.
+ */
+struct FrontendAnalogCapabilities {
+ bitfield<FrontendAnalogType> typeCap;
+ bitfield<FrontendAnalogSifStandard> sifStandardCap;
+};
+
+/**
+ * Signal Setting for Frontend.
*/
safe_union FrontendSettings {
+ FrontendAnalogSettings analog;
FrontendAtscSettings atsc;
+ FrontendAtsc3Settings atsc3;
+ FrontendDvbsSettings dvbs;
+ FrontendDvbcSettings dvbc;
FrontendDvbtSettings dvbt;
+ FrontendIsdbsSettings isdbs;
+ FrontendIsdbs3Settings isdbs3;
+ FrontendIsdbtSettings isdbt;
+};
+
+/**
+ * Scan type for Frontend.
+ */
+enum FrontendScanType : uint32_t {
+ SCAN_UNDEFINED = 0,
+ SCAN_AUTO = 1 << 0,
+ SCAN_BLIND = 1 << 1,
+};
+
+/**
+ * Scan Message Type for Frontend.
+ */
+enum FrontendScanMessageType : uint32_t {
+ /** Scan locked the signal. */
+ LOCKED,
+ /** Scan stopped. */
+ END,
+ /** Scan progress report. */
+ PROGRESS_PERCENT,
+ /** Locked frequency report. */
+ FREQUENCY,
+ /** Locked symbol rate. */
+ SYMBOL_RATE,
+ /** Locked Plp Ids for DVBT2 frontend. */
+ PLP_IDS,
+ /** Locked group Ids for DVBT2 frontend. */
+ GROUP_IDS,
+ /** Stream Ids. */
+ INPUT_STREAM_IDS,
+ /** Locked signal standard. */
+ STANDARD,
+ /** PLP status in a tuned frequency band for ATSC3 frontend. */
+ ATSC3_PLP_INFO,
+};
+
+/**
+ * ATSC3.0 PLP information for scan
+ */
+struct FrontendScanAtsc3PlpInfo {
+ uint8_t plpId;
+ bool bLlsFlag;
+};
+
+/**
+ * Scan Message for Frontend.
+ */
+safe_union FrontendScanMessage {
+ bool isLocked;
+ bool isEnd;
+ /** scan progress percent (0..100) */
+ uint8_t progressPercent;
+ /** Signal frequency in Hertz */
+ uint32_t frequency;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ vec<uint8_t> plpIds;
+ vec<uint8_t> groupIds;
+ vec<uint16_t> inputStreamIds;
+ safe_union standard {
+ FrontendDvbsStandard sStd;
+ FrontendDvbtStandard tStd;
+ } std;
+ /** A list of PLP status in a tuned frequency band for ATSC3 frontend. */
+ vec<FrontendScanAtsc3PlpInfo> atsc3PlpInfos;
};
/**
@@ -122,20 +994,245 @@
@export
enum FrontendEventType : uint32_t {
/**
- * If frontend locked the signal which is specified by tune method, HAL sent
+ * If frontend locked the signal which is specified by tune method, HAL sends
* Locked event.
*/
LOCKED,
/**
* If frontend can't locked the signal which is specified by tune method,
- * HAL sent NO_SIGNAL event.
+ * HAL sends NO_SIGNAL event.
*/
NO_SIGNAL,
/**
- * If frontend detect that the locked signal get lost, HAL sent LOST_LOCK
+ * If frontend detect that the locked signal get lost, HAL sends LOST_LOCK
* event.
*/
LOST_LOCK,
+ /**
+ * If frontend detect that incoming Diseqc message is overflow.
+ */
+ DISEQC_RX_OVERFLOW,
+ /**
+ * If frontend detect that outgoing Diseqc message isn't delivered on time.
+ */
+ DISEQC_RX_TIMEOUT,
+ /**
+ * If frontend detect that the incoming Diseqc message has parity error.
+ */
+ DISEQC_RX_PARITY_ERROR,
+ /**
+ * If frontend detect that the LNB is overload.
+ */
+ LNB_OVERLOAD,
+};
+
+/**
+ * Frontend Status Type.
+ */
+@export
+enum FrontendStatusType : uint32_t {
+ /** Lock status for Demod. */
+ DEMOD_LOCK,
+ /** Signal to Noise Ratio. */
+ SNR,
+ /** Bit Error Ratio. */
+ BER,
+ /** Packages Error Ratio. */
+ PER,
+ /** Bit Error Ratio before FEC. */
+ PRE_BER,
+ /*
+ * Signal Quality (0..100). Good data over total data in percent can be
+ * used as a way to present Signal Quality.
+ */
+ SIGNAL_QUALITY,
+ /** Signal Strength. */
+ SIGNAL_STRENGTH,
+ /** Symbol Rate. */
+ SYMBOL_RATE,
+ /** Forward Error Correction Type. */
+ FEC,
+ /** Modulation Type. */
+ MODULATION,
+ /** Spectral Inversion Type. */
+ SPECTRAL,
+ /** LNB Voltage. */
+ LNB_VOLTAGE,
+ /** Physical Layer Pipe ID. */
+ PLP_ID,
+ /** Status for Emergency Warning Broadcasting System. */
+ EWBS,
+ /** Automatic Gain Control. */
+ AGC,
+ /** Low Noise Amplifier. */
+ LNA,
+ /** Lock status for stream. */
+ STREAM_LOCK,
+ /** Error status by layer. */
+ LAYER_ERROR,
+ /** CN value by VBER. */
+ VBER_CN,
+ /** CN value by LBER. */
+ LBER_CN,
+ /** CN value by XER. */
+ XER_CN,
+ /** Moduration Error Ratio. */
+ MER,
+ /** Difference between tuning frequency and actual locked frequency. */
+ FREQ_OFFSET,
+ /* Hierarchy for DVBT. */
+ HIERARCHY,
+ /** Lock status for RF. */
+ RF_LOCK,
+ /** PLP information in a frequency band for ATSC3.0 frontend. */
+ ATSC3_PLP_INFO,
+};
+
+/**
+ * Status for each tuning PLPs
+ */
+struct FrontendStatusAtsc3PlpInfo {
+ /** PLP Id value. */
+ uint8_t plpId;
+ /** Demod Lock/Unlock status of this particular PLP. */
+ bool isLocked;
+ /** Uncorrectable Error Counts (UEC) of this particular PLP since last tune operation. */
+ uint32_t uec;
+};
+
+
+/**
+ * Modulation Type for Frontend's status.
+ */
+safe_union FrontendModulationStatus {
+ FrontendDvbcModulation dvbc;
+ FrontendDvbsModulation dvbs;
+ FrontendIsdbsModulation isdbs;
+ FrontendIsdbs3Modulation isdbs3;
+ FrontendIsdbtModulation isdbt;
+};
+
+/**
+ * The status for Frontend.
+ */
+safe_union FrontendStatus {
+ /** Lock status for Demod in True/False. */
+ bool isDemodLocked;
+ /** SNR value measured by 0.001 dB. */
+ int32_t snr;
+ /** The number of error bit per 1 billion bits. */
+ uint32_t ber;
+ /** The number of error package per 1 billion packages. */
+ uint32_t per;
+ /** The number of error bit per 1 billion bits before FEC. */
+ uint32_t preBer;
+ /** Signal Quality in percent. */
+ uint32_t signalQuality;
+ /** Signal Strength measured by 0.001 dBm. */
+ int32_t signalStrength;
+ /** Symbols per second */
+ uint32_t symbolRate;
+ FrontendInnerFec innerFec;
+ FrontendModulationStatus modulation;
+ FrontendDvbcSpectralInversion inversion;
+ FrontendLnbVoltage lnbVoltage;
+ uint8_t plpId;
+ bool isEWBS;
+ /** AGC value is normalized from 0 to 255. */
+ uint8_t agc;
+ bool isLnaOn;
+ bool isStreamLock;
+ vec<bool> isLayerError;
+ /** CN value by VBER measured by 0.001 dB */
+ int32_t vberCn;
+ /** CN value by LBER measured by 0.001 dB */
+ int32_t lberCn;
+ /** CN value by XER measured by 0.001 dB */
+ int32_t xerCn;
+ /** MER value measured by 0.001 dB */
+ int32_t mer;
+ /** Frequency difference in Hertz. */
+ int32_t freqOffset;
+ FrontendDvbtHierarchy hierarchy;
+ bool isRfLocked;
+ /** A list of PLP status for tuned PLPs for ATSC3 frontend. */
+ vec<FrontendStatusAtsc3PlpInfo> plpInfo;
+};
+
+/**
+ * Information for the Frontend.
+ */
+struct FrontendInfo {
+ FrontendType type;
+ /** Frequency in Hertz */
+ uint32_t minFrequency;
+ /** Frequency in Hertz */
+ uint32_t maxFrequency;
+ /** Minimum symbols per second */
+ uint32_t minSymbolRate;
+ /** Maximum symbols per second */
+ uint32_t maxSymbolRate;
+ /** Range in Hertz */
+ uint32_t acquireRange;
+ /*
+ * Frontends are assigned with the same exclusiveGroupId if they can't
+ * function at same time. For instance, they share same hardware module.
+ */
+ uint32_t exclusiveGroupId;
+ /** A list of supported status types which client can inquiry */
+ vec<FrontendStatusType> statusCaps;
+ safe_union FrontendCapabilities {
+ FrontendAnalogCapabilities analogCaps;
+ FrontendAtscCapabilities atscCaps;
+ FrontendAtsc3Capabilities atsc3Caps;
+ FrontendDvbsCapabilities dvbsCaps;
+ FrontendDvbcCapabilities dvbcCaps;
+ FrontendDvbtCapabilities dvbtCaps;
+ FrontendIsdbsCapabilities isdbsCaps;
+ FrontendIsdbs3Capabilities isdbs3Caps;
+ FrontendIsdbtCapabilities isdbtCaps;
+ } frontendCaps;
+};
+
+/*
+ * Low-Noise Block downconverter (LNB) ID is used to associate with a hardware
+ * LNB module.
+ */
+typedef uint32_t LnbId;
+
+/**
+ * Power Voltage Type for LNB.
+ */
+@export
+enum FrontendLnbVoltage : uint32_t {
+ NONE,
+ VOLTAGE_5V,
+ VOLTAGE_11V,
+ VOLTAGE_12V,
+ VOLTAGE_13V,
+ VOLTAGE_14V,
+ VOLTAGE_15V,
+ VOLTAGE_18V,
+ VOLTAGE_19V,
+};
+
+/**
+ * Tone Type for LNB.
+ */
+@export
+enum FrontendLnbTone : int32_t {
+ NONE,
+ CONTINUOUS,
+};
+
+/**
+ * The Position of LNB.
+ */
+@export
+enum FrontendLnbPosition : int32_t {
+ UNDEFINED,
+ POSITION_A,
+ POSITION_B,
};
/* Demux ID is used to associate with a hardware demux resource. */
@@ -166,7 +1263,7 @@
*/
AUDIO,
/**
- * A filter to filter Vidoe Metadata out from input stream.
+ * A filter to filter Video Metadata out from input stream.
*/
VIDEO,
/**
@@ -248,7 +1345,7 @@
/* Version number for Section Filter */
uint16_t version;
/* true if the filter checks CRC and discards data with wrong CRC */
- bool checkCrc;
+ bool isCheckCrc;
/* true if the filter repeats the data with the same version */
bool isRepeat;
/* true if the filter output raw data */
@@ -265,7 +1362,7 @@
DemuxTpid tpid;
DemuxStreamId streamId;
/* true if the filter output raw data */
- bool bIsRaw;
+ bool isRaw;
};
/**
@@ -283,7 +1380,7 @@
/**
* true if the filter output goes to decoder directly in pass through mode.
*/
- bool bPassthrough;
+ bool isPassthrough;
};
/**
@@ -294,7 +1391,7 @@
/**
* true if the filter output goes to decoder directly in pass through mode.
*/
- bool bPassthrough;
+ bool isPassthrough;
};
/**
@@ -479,3 +1576,132 @@
* framework and apps.
*/
typedef vec<uint8_t> TunerKeyToken;
+
+/**
+ * A data format in demux's output or input according to ISO/IEC 13818-1.
+ */
+@export
+enum DemuxDataFormat : uint32_t {
+ /* Data is Transport Stream. */
+ TS,
+ /* Data is Packetized Elementary Stream. */
+ PES,
+ /* Data is Elementary Stream. */
+ ES,
+ /* Data is TLV (type-length-value) Stream for JP SHV */
+ SHV_TLV,
+};
+
+/**
+ * A status of the demux's output.
+ */
+typedef DemuxFilterStatus DemuxOutputStatus;
+
+/**
+ * The Settings for the demux's output.
+ */
+struct DemuxOutputSettings {
+ /**
+ * Register for interested status events so that the HAL can send these
+ * status events back to client.
+ */
+ bitfield<DemuxOutputStatus> statusMask;
+ /**
+ * Unconsumed data size in bytes in the output. The HAL uses it to trigger
+ * DemuxOutputStatus::LOW_WATER.
+ */
+ uint32_t lowThreshold;
+ /**
+ * Unconsumed data size in bytes in the output. The HAL uses it to trigger
+ * DemuxOutputStatus::High_WATER.
+ */
+ uint32_t highThreshold;
+ /**
+ * The data format in the output.
+ */
+ DemuxDataFormat dataFormat;
+ /**
+ * The packet size in bytes in the output.
+ */
+ uint8_t packetSize;
+};
+
+/**
+ * A status of the demux's input.
+ */
+@export
+enum DemuxInputStatus : uint32_t {
+ /**
+ * The space of the demux's input is empty.
+ */
+ SPACE_EMPTY = 1 << 0,
+ /**
+ * The spece of the demux's input is almost empty.
+ */
+ SPACE_ALMOST_EMPTY = 1 << 1,
+ /**
+ * The space of the demux's input is almost full.
+ */
+ SPACE_ALMOST_FULL = 1 << 2,
+ /**
+ * The space of the demux's input is full.
+ */
+ SPACE_FULL = 1 << 3,
+};
+
+/**
+ * The Settings for the demux's input.
+ */
+@export
+struct DemuxInputSettings {
+ /**
+ * Register for interested status events so that the HAL can send these
+ * status events back to client.
+ */
+ bitfield<DemuxInputStatus> statusMask;
+ /**
+ * Unused space size in bytes in the input. The HAL uses it to trigger
+ * DemuxInputStatus::SPACE_ALMOST_EMPTY.
+ */
+ uint32_t lowThreshold;
+ /**
+ * Unused space size in bytes in the input. The HAL uses it to trigger
+ * DemuxInputStatus::SPACE_ALMOST_FULL.
+ */
+ uint32_t highThreshold;
+ /**
+ * The data format in the input.
+ */
+ DemuxDataFormat dataFormat;
+ /**
+ * The packet size in bytes in the input.
+ */
+ uint8_t packetSize;
+};
+
+/**
+ * Capabilities for Demux.
+ */
+@export
+struct DemuxCapabilities {
+ /* The number of Demux to be supported. */
+ uint32_t numDemux;
+ /* The number of Input to be supported. */
+ uint32_t numInput;
+ /* The number of Output to be supported. */
+ uint32_t numOutput;
+ /* The number of TS Filter to be supported. */
+ uint32_t numTsFilter;
+ /* The number of Section Filter to be supported. */
+ uint32_t numSectionFilter;
+ /* The number of Audio Filter to be supported. */
+ uint32_t numAudioFilter;
+ /* The number of Video Filter to be supported. */
+ uint32_t numVideoFilter;
+ /* The number of PES Filter to be supported. */
+ uint32_t numPesFilter;
+ /* The number of PCR Filter to be supported. */
+ uint32_t numPcrFilter;
+ /* The maximum number of bytes is supported in the mask of Section Filter. */
+ uint32_t numBytesInSectionFilter;
+};
diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
index 66adb2a..7936185 100644
--- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
+++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
@@ -34,8 +34,12 @@
#include <hidlmemory/FrameworkUtils.h>
#include <utils/Condition.h>
#include <utils/Mutex.h>
+#include <fstream>
+#include <iostream>
+#include <map>
#define WAIT_TIMEOUT 3000000000
+#define WAIT_TIMEOUT_data_ready 3000000000 * 4
using android::Condition;
using android::IMemory;
@@ -53,11 +57,18 @@
using android::hardware::MQDescriptorSync;
using android::hardware::Return;
using android::hardware::Void;
+using android::hardware::tv::tuner::V1_0::DemuxDataFormat;
using android::hardware::tv::tuner::V1_0::DemuxFilterEvent;
+using android::hardware::tv::tuner::V1_0::DemuxFilterPesDataSettings;
using android::hardware::tv::tuner::V1_0::DemuxFilterPesEvent;
using android::hardware::tv::tuner::V1_0::DemuxFilterSectionEvent;
+using android::hardware::tv::tuner::V1_0::DemuxFilterSectionSettings;
+using android::hardware::tv::tuner::V1_0::DemuxFilterSettings;
using android::hardware::tv::tuner::V1_0::DemuxFilterStatus;
using android::hardware::tv::tuner::V1_0::DemuxFilterType;
+using android::hardware::tv::tuner::V1_0::DemuxInputSettings;
+using android::hardware::tv::tuner::V1_0::DemuxInputStatus;
+using android::hardware::tv::tuner::V1_0::DemuxOutputStatus;
using android::hardware::tv::tuner::V1_0::DemuxQueueNotifyBits;
using android::hardware::tv::tuner::V1_0::FrontendAtscModulation;
using android::hardware::tv::tuner::V1_0::FrontendAtscSettings;
@@ -65,6 +76,8 @@
using android::hardware::tv::tuner::V1_0::FrontendEventType;
using android::hardware::tv::tuner::V1_0::FrontendId;
using android::hardware::tv::tuner::V1_0::FrontendInnerFec;
+using android::hardware::tv::tuner::V1_0::FrontendScanMessage;
+using android::hardware::tv::tuner::V1_0::FrontendScanMessageType;
using android::hardware::tv::tuner::V1_0::FrontendSettings;
using android::hardware::tv::tuner::V1_0::IDemux;
using android::hardware::tv::tuner::V1_0::IDemuxCallback;
@@ -77,9 +90,9 @@
namespace {
using FilterMQ = MessageQueue<uint8_t, kSynchronizedReadWrite>;
-using FilterMQDesc = MQDescriptorSync<uint8_t>;
+using MQDesc = MQDescriptorSync<uint8_t>;
-const std::vector<uint8_t> goldenDataInputBuffer{
+const std::vector<uint8_t> goldenDataOutputBuffer{
0x00, 0x00, 0x00, 0x01, 0x09, 0xf0, 0x00, 0x00, 0x00, 0x01, 0x67, 0x42, 0xc0, 0x1e, 0xdb,
0x01, 0x40, 0x16, 0xec, 0x04, 0x40, 0x00, 0x00, 0x03, 0x00, 0x40, 0x00, 0x00, 0x0f, 0x03,
0xc5, 0x8b, 0xb8, 0x00, 0x00, 0x00, 0x01, 0x68, 0xca, 0x8c, 0xb2, 0x00, 0x00, 0x01, 0x06,
@@ -119,9 +132,17 @@
};
const uint16_t FMQ_SIZE_4K = 0x1000;
-// Equal to SECTION_WRITE_COUNT on the HAL impl side
-// The HAL impl will repeatedly write to the FMQ the count times
-const uint16_t SECTION_READ_COUNT = 10;
+const uint32_t FMQ_SIZE_1M = 0x100000;
+
+struct FilterConf {
+ DemuxFilterType type;
+ DemuxFilterSettings setting;
+};
+
+struct InputConf {
+ string inputDataFile;
+ DemuxInputSettings setting;
+};
class FrontendCallback : public IFrontendCallback {
public:
@@ -141,12 +162,21 @@
return Void();
}
+ virtual Return<void> onScanMessage(FrontendScanMessageType /* type */,
+ const FrontendScanMessage& /* message */) override {
+ android::Mutex::Autolock autoLock(mMsgLock);
+ mScanMessageReceived = true;
+ mMsgCondition.signal();
+ return Void();
+ };
+
void testOnEvent(sp<IFrontend>& frontend, FrontendSettings settings);
void testOnDiseqcMessage(sp<IFrontend>& frontend, FrontendSettings settings);
private:
bool mEventReceived = false;
bool mDiseqcMessageReceived = false;
+ bool mScanMessageReceived = false;
FrontendEventType mEventType;
hidl_vec<uint8_t> mEventMessage;
android::Mutex mMsgLock;
@@ -185,8 +215,14 @@
public:
virtual Return<void> onFilterEvent(const DemuxFilterEvent& filterEvent) override {
android::Mutex::Autolock autoLock(mMsgLock);
- mFilterEventReceived = true;
- mFilterEvent = filterEvent;
+ // Temprarily we treat the first coming back filter data on the matching pid a success
+ // once all of the MQ are cleared, means we got all the expected output
+ mFilterIdToEvent[filterEvent.filterId] = filterEvent;
+ readFilterEventData(filterEvent.filterId);
+ mPidFilterOutputCount++;
+ // mFilterIdToMQ.erase(filterEvent.filterId);
+
+ // startFilterEventThread(filterEvent);
mMsgCondition.signal();
return Void();
}
@@ -196,117 +232,235 @@
return Void();
}
+ virtual Return<void> onOutputStatus(DemuxOutputStatus /*status*/) override { return Void(); }
+
+ virtual Return<void> onInputStatus(DemuxInputStatus status) override {
+ // android::Mutex::Autolock autoLock(mMsgLock);
+ ALOGW("[vts] input status %d", status);
+ switch (status) {
+ case DemuxInputStatus::SPACE_EMPTY:
+ case DemuxInputStatus::SPACE_ALMOST_EMPTY:
+ ALOGW("[vts] keep inputing %d", status);
+ mKeepWritingInputFMQ = true;
+ break;
+ case DemuxInputStatus::SPACE_ALMOST_FULL:
+ case DemuxInputStatus::SPACE_FULL:
+ ALOGW("[vts] stop inputing %d", status);
+ mKeepWritingInputFMQ = false;
+ break;
+ }
+ return Void();
+ }
+
void testOnFilterEvent(uint32_t filterId);
- void testOnSectionFilterEvent(sp<IDemux>& demux, uint32_t filterId,
- FilterMQDesc& filterMQDescriptor);
- void testOnPesFilterEvent(sp<IDemux>& demux, uint32_t filterId,
- FilterMQDesc& filterMQDescriptor);
- void readAndCompareSectionEventData();
- void readAndComparePesEventData();
+ void testFilterDataOutput();
+ void stopInputThread();
+
+ void startPlaybackInputThread(InputConf inputConf, MQDesc& inputMQDescriptor);
+ void startFilterEventThread(DemuxFilterEvent event);
+ static void* __threadLoopInput(void* threadArgs);
+ static void* __threadLoopFilter(void* threadArgs);
+ void inputThreadLoop(InputConf* inputConf, bool* keepWritingInputFMQ);
+ void filterThreadLoop(DemuxFilterEvent& event);
+
+ void updateFilterMQ(uint32_t filterId, MQDesc& filterMQDescriptor);
+ void updateGoldenOutputMap(uint32_t filterId, string goldenOutputFile);
+ bool readFilterEventData(uint32_t filterId);
private:
- bool mFilterEventReceived = false;
- std::vector<uint8_t> mDataOutputBuffer;
- std::unique_ptr<FilterMQ> mFilterMQ;
+ struct InputThreadArgs {
+ DemuxCallback* user;
+ InputConf* inputConf;
+ bool* keepWritingInputFMQ;
+ };
+ struct FilterThreadArgs {
+ DemuxCallback* user;
+ DemuxFilterEvent event;
+ };
uint16_t mDataLength = 0;
- DemuxFilterEvent mFilterEvent;
+ std::vector<uint8_t> mDataOutputBuffer;
+
+ bool mFilterEventReceived;
+ std::map<uint32_t, string> mFilterIdToGoldenOutput;
+
+ std::map<uint32_t, std::unique_ptr<FilterMQ>> mFilterIdToMQ;
+ std::unique_ptr<FilterMQ> mInputMQ;
+ std::map<uint32_t, EventFlag*> mFilterIdToMQEventFlag;
+ std::map<uint32_t, DemuxFilterEvent> mFilterIdToEvent;
+ EventFlag* mInputMQEventFlag;
+
android::Mutex mMsgLock;
- android::Mutex mReadLock;
+ android::Mutex mFilterOutputLock;
+ android::Mutex mInputThreadLock;
android::Condition mMsgCondition;
- EventFlag* mFilterMQEventFlag;
+ android::Condition mFilterOutputCondition;
+
+ bool mKeepWritingInputFMQ = true;
+ bool mInputThreadRunning;
+ pthread_t mInputThread;
+ pthread_t mFilterThread;
+
+ int mPidFilterOutputCount = 0;
};
-void DemuxCallback::testOnFilterEvent(uint32_t filterId) {
+void DemuxCallback::startPlaybackInputThread(InputConf inputConf, MQDesc& inputMQDescriptor) {
+ mInputMQ = std::make_unique<FilterMQ>(inputMQDescriptor, true /* resetPointers */);
+ EXPECT_TRUE(mInputMQ);
+ struct InputThreadArgs* threadArgs =
+ (struct InputThreadArgs*)malloc(sizeof(struct InputThreadArgs));
+ threadArgs->user = this;
+ threadArgs->inputConf = &inputConf;
+ threadArgs->keepWritingInputFMQ = &mKeepWritingInputFMQ;
+
+ pthread_create(&mInputThread, NULL, __threadLoopInput, (void*)threadArgs);
+ pthread_setname_np(mInputThread, "test_playback_input_loop");
+}
+
+void DemuxCallback::startFilterEventThread(DemuxFilterEvent event) {
+ struct FilterThreadArgs* threadArgs =
+ (struct FilterThreadArgs*)malloc(sizeof(struct FilterThreadArgs));
+ threadArgs->user = this;
+ threadArgs->event = event;
+
+ pthread_create(&mFilterThread, NULL, __threadLoopFilter, (void*)threadArgs);
+ pthread_setname_np(mFilterThread, "test_playback_input_loop");
+}
+
+void DemuxCallback::testFilterDataOutput() {
android::Mutex::Autolock autoLock(mMsgLock);
- while (!mFilterEventReceived) {
+ while (mPidFilterOutputCount < 1) {
if (-ETIMEDOUT == mMsgCondition.waitRelative(mMsgLock, WAIT_TIMEOUT)) {
- EXPECT_TRUE(false) << "filter event not received within timeout";
+ EXPECT_TRUE(false) << "filter output matching pid does not output within timeout";
return;
}
}
- // Reset the filter event recieved flag
- mFilterEventReceived = false;
- // Check if filter id match
- EXPECT_TRUE(filterId == mFilterEvent.filterId) << "filter id match";
+ mPidFilterOutputCount = 0;
+ ALOGW("[vts] pass and stop");
}
-void DemuxCallback::testOnSectionFilterEvent(sp<IDemux>& demux, uint32_t filterId,
- FilterMQDesc& filterMQDescriptor) {
- Result status;
- // Create MQ to read the output into the local buffer
- mFilterMQ = std::make_unique<FilterMQ>(filterMQDescriptor, true /* resetPointers */);
- EXPECT_TRUE(mFilterMQ);
+void DemuxCallback::stopInputThread() {
+ mInputThreadRunning = false;
+ mKeepWritingInputFMQ = false;
+
+ android::Mutex::Autolock autoLock(mInputThreadLock);
+}
+
+void DemuxCallback::updateFilterMQ(uint32_t filterId, MQDesc& filterMQDescriptor) {
+ mFilterIdToMQ[filterId] =
+ std::make_unique<FilterMQ>(filterMQDescriptor, true /* resetPointers */);
+ EXPECT_TRUE(mFilterIdToMQ[filterId]);
+ EXPECT_TRUE(EventFlag::createEventFlag(mFilterIdToMQ[filterId]->getEventFlagWord(),
+ &mFilterIdToMQEventFlag[filterId]) == android::OK);
+}
+
+void DemuxCallback::updateGoldenOutputMap(uint32_t filterId, string goldenOutputFile) {
+ mFilterIdToGoldenOutput[filterId] = goldenOutputFile;
+}
+
+void* DemuxCallback::__threadLoopInput(void* threadArgs) {
+ DemuxCallback* const self =
+ static_cast<DemuxCallback*>(((struct InputThreadArgs*)threadArgs)->user);
+ self->inputThreadLoop(((struct InputThreadArgs*)threadArgs)->inputConf,
+ ((struct InputThreadArgs*)threadArgs)->keepWritingInputFMQ);
+ return 0;
+}
+
+void DemuxCallback::inputThreadLoop(InputConf* inputConf, bool* keepWritingInputFMQ) {
+ android::Mutex::Autolock autoLock(mInputThreadLock);
+ mInputThreadRunning = true;
+
// Create the EventFlag that is used to signal the HAL impl that data have been
- // read the Filter FMQ
- EXPECT_TRUE(EventFlag::createEventFlag(mFilterMQ->getEventFlagWord(), &mFilterMQEventFlag) ==
+ // written into the Input FMQ
+ EventFlag* inputMQEventFlag;
+ EXPECT_TRUE(EventFlag::createEventFlag(mInputMQ->getEventFlagWord(), &inputMQEventFlag) ==
android::OK);
- // Start filter
- status = demux->startFilter(filterId);
- EXPECT_EQ(status, Result::SUCCESS);
- // Test start filter and receive callback event
- for (int i = 0; i < SECTION_READ_COUNT; i++) {
- testOnFilterEvent(filterId);
- // checksum of mDataOutputBuffer and Input golden input
- readAndCompareSectionEventData();
+
+ // open the stream and get its length
+ std::ifstream inputData(inputConf->inputDataFile, std::ifstream::binary);
+ int writeSize = inputConf->setting.packetSize * 6;
+ char* buffer = new char[writeSize];
+ ALOGW("[vts] input thread loop start %s", inputConf->inputDataFile.c_str());
+ if (!inputData.is_open()) {
+ mInputThreadRunning = false;
+ ALOGW("[vts] Error %s", strerror(errno));
}
-}
-void DemuxCallback::testOnPesFilterEvent(sp<IDemux>& demux, uint32_t filterId,
- FilterMQDesc& filterMQDescriptor) {
- Result status;
- // Create MQ to read the output into the local buffer
- mFilterMQ = std::make_unique<FilterMQ>(filterMQDescriptor, true /* resetPointers */);
- EXPECT_TRUE(mFilterMQ);
- // Create the EventFlag that is used to signal the HAL impl that data have been
- // read the Filter FMQ
- EXPECT_TRUE(EventFlag::createEventFlag(mFilterMQ->getEventFlagWord(), &mFilterMQEventFlag) ==
- android::OK);
- // Start filter
- status = demux->startFilter(filterId);
- EXPECT_EQ(status, Result::SUCCESS);
- // Test start filter and receive callback event
- testOnFilterEvent(filterId);
- // checksum of mDataOutputBuffer and Input golden input
- readAndComparePesEventData();
-}
-
-void DemuxCallback::readAndCompareSectionEventData() {
- bool result = false;
- for (int i = 0; i < mFilterEvent.events.size(); i++) {
- DemuxFilterSectionEvent event = mFilterEvent.events[i].section();
- mDataLength = event.dataLength;
- EXPECT_TRUE(mDataLength == goldenDataInputBuffer.size()) << "buffer size does not match";
-
- mDataOutputBuffer.resize(mDataLength);
- result = mFilterMQ->read(mDataOutputBuffer.data(), mDataLength);
- EXPECT_TRUE(result) << "can't read from Filter MQ";
-
- for (int i = 0; i < mDataLength; i++) {
- EXPECT_TRUE(goldenDataInputBuffer[i] == mDataOutputBuffer[i]) << "data does not match";
+ while (mInputThreadRunning) {
+ // move the stream pointer for packet size * 6 every read until the end
+ while (*keepWritingInputFMQ) {
+ inputData.read(buffer, writeSize);
+ if (!inputData) {
+ int leftSize = inputData.gcount();
+ if (leftSize == 0) {
+ mInputThreadRunning = false;
+ break;
+ }
+ inputData.clear();
+ inputData.read(buffer, leftSize);
+ // Write the left over of the input data and quit the thread
+ if (leftSize > 0) {
+ EXPECT_TRUE(mInputMQ->write((unsigned char*)&buffer[0], leftSize));
+ inputMQEventFlag->wake(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_READY));
+ }
+ mInputThreadRunning = false;
+ break;
+ }
+ // Write input FMQ and notify the Tuner Implementation
+ EXPECT_TRUE(mInputMQ->write((unsigned char*)&buffer[0], writeSize));
+ inputMQEventFlag->wake(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_READY));
+ inputData.seekg(writeSize, inputData.cur);
+ sleep(1);
}
}
- if (result) {
- mFilterMQEventFlag->wake(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_CONSUMED));
- }
+
+ ALOGW("[vts] Input thread end.");
+
+ delete[] buffer;
+ inputData.close();
}
-void DemuxCallback::readAndComparePesEventData() {
- // TODO handle multiple events in one filter callback event
- DemuxFilterPesEvent event = mFilterEvent.events[0].pes();
- mDataLength = event.dataLength;
- EXPECT_TRUE(mDataLength == goldenDataInputBuffer.size()) << "buffer size does not match";
+void* DemuxCallback::__threadLoopFilter(void* threadArgs) {
+ DemuxCallback* const self =
+ static_cast<DemuxCallback*>(((struct FilterThreadArgs*)threadArgs)->user);
+ self->filterThreadLoop(((struct FilterThreadArgs*)threadArgs)->event);
+ return 0;
+}
- mDataOutputBuffer.resize(mDataLength);
- bool result = mFilterMQ->read(mDataOutputBuffer.data(), mDataLength);
- EXPECT_TRUE(result) << "can't read from Filter MQ";
+void DemuxCallback::filterThreadLoop(DemuxFilterEvent& /* event */) {
+ android::Mutex::Autolock autoLock(mFilterOutputLock);
+ // Read from mFilterIdToMQ[event.filterId] per event and filter type
- if (result) {
- mFilterMQEventFlag->wake(static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_CONSUMED));
+ // Assemble to filterOutput[filterId]
+
+ // check if filterOutput[filterId] matches goldenOutput[filterId]
+
+ // If match, remove filterId entry from MQ map
+
+ // end thread
+}
+
+bool DemuxCallback::readFilterEventData(uint32_t filterId) {
+ bool result = false;
+ DemuxFilterEvent filterEvent = mFilterIdToEvent[filterId];
+ ALOGW("[vts] reading from filter FMQ %d", filterId);
+ // todo separate filter handlers
+ for (int i = 0; i < filterEvent.events.size(); i++) {
+ DemuxFilterPesEvent event = filterEvent.events[i].pes();
+ mDataLength = event.dataLength;
+ // EXPECT_TRUE(mDataLength == goldenDataOutputBuffer.size()) << "buffer size does not
+ // match";
+
+ mDataOutputBuffer.resize(mDataLength);
+ result = mFilterIdToMQ[filterId]->read(mDataOutputBuffer.data(), mDataLength);
+ EXPECT_TRUE(result) << "can't read from Filter MQ";
+
+ /*for (int i = 0; i < mDataLength; i++) {
+ EXPECT_TRUE(goldenDataOutputBuffer[i] == mDataOutputBuffer[i]) << "data does not match";
+ }*/
}
-
- for (int i = 0; i < mDataLength; i++) {
- EXPECT_TRUE(goldenDataInputBuffer[i] == mDataOutputBuffer[i]) << "data does not match";
- }
+ mFilterIdToMQEventFlag[filterId]->wake(
+ static_cast<uint32_t>(DemuxQueueNotifyBits::DATA_CONSUMED));
+ return result;
}
// Test environment for Tuner HIDL HAL.
@@ -341,24 +495,36 @@
sp<IDescrambler> mDescrambler;
sp<IDemux> mDemux;
sp<DemuxCallback> mDemuxCallback;
- FilterMQDesc mFilterMQDescriptor;
+ MQDesc mFilterMQDescriptor;
+ MQDesc mInputMQDescriptor;
+ vector<uint32_t> mUsedFilterIds;
+
uint32_t mDemuxId;
uint32_t mFilterId;
+ pthread_t mInputThread;
+ bool mInputThreadRunning;
+
::testing::AssertionResult createFrontend(int32_t frontendId);
::testing::AssertionResult tuneFrontend(int32_t frontendId);
::testing::AssertionResult stopTuneFrontend(int32_t frontendId);
::testing::AssertionResult closeFrontend(int32_t frontendId);
::testing::AssertionResult createDemux();
- ::testing::AssertionResult createDemuxWithFrontend(int32_t frontendId);
- ::testing::AssertionResult addSectionFilterToDemux();
- ::testing::AssertionResult addPesFilterToDemux();
- ::testing::AssertionResult getFilterMQDescriptor(sp<IDemux>& demux, const uint32_t filterId);
- ::testing::AssertionResult readSectionFilterDataOutput();
- ::testing::AssertionResult readPesFilterDataOutput();
+ ::testing::AssertionResult createDemuxWithFrontend(int32_t frontendId,
+ FrontendSettings settings);
+ ::testing::AssertionResult getInputMQDescriptor();
+ ::testing::AssertionResult addInputToDemux(DemuxInputSettings setting);
+ ::testing::AssertionResult addFilterToDemux(DemuxFilterType type, DemuxFilterSettings setting);
+ ::testing::AssertionResult getFilterMQDescriptor(const uint32_t filterId);
::testing::AssertionResult closeDemux();
::testing::AssertionResult createDescrambler();
::testing::AssertionResult closeDescrambler();
+
+ ::testing::AssertionResult playbackDataFlowTest(vector<FilterConf> filterConf,
+ InputConf inputConf,
+ vector<string> goldenOutputFiles);
+ ::testing::AssertionResult broadcastDataFlowTest(vector<FilterConf> filterConf,
+ vector<string> goldenOutputFiles);
};
::testing::AssertionResult TunerHidlTest::createFrontend(int32_t frontendId) {
@@ -389,13 +555,11 @@
.frequency = 0,
.modulation = FrontendAtscModulation::UNDEFINED,
};
- frontendSettings.atsc() = frontendAtscSettings;
+ frontendSettings.atsc(frontendAtscSettings);
mFrontendCallback->testOnEvent(mFrontend, frontendSettings);
FrontendDvbtSettings frontendDvbtSettings{
.frequency = 0,
- .modulation = FrontendAtscModulation::UNDEFINED,
- .fec = FrontendInnerFec::FEC_UNDEFINED,
};
frontendSettings.dvbt(frontendDvbtSettings);
mFrontendCallback->testOnEvent(mFrontend, frontendSettings);
@@ -405,7 +569,7 @@
::testing::AssertionResult TunerHidlTest::stopTuneFrontend(int32_t frontendId) {
Result status;
- if (createFrontend(frontendId) == ::testing::AssertionFailure()) {
+ if (!mFrontend && createFrontend(frontendId) == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
@@ -415,11 +579,12 @@
::testing::AssertionResult TunerHidlTest::closeFrontend(int32_t frontendId) {
Result status;
- if (createFrontend(frontendId) == ::testing::AssertionFailure()) {
+ if (!mFrontend && createFrontend(frontendId) == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
status = mFrontend->close();
+ mFrontend = nullptr;
return ::testing::AssertionResult(status == Result::SUCCESS);
}
@@ -434,127 +599,33 @@
return ::testing::AssertionResult(status == Result::SUCCESS);
}
-::testing::AssertionResult TunerHidlTest::createDemuxWithFrontend(int32_t frontendId) {
+::testing::AssertionResult TunerHidlTest::createDemuxWithFrontend(int32_t frontendId,
+ FrontendSettings settings) {
Result status;
- if (createDemux() == ::testing::AssertionFailure()) {
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
- if (createFrontend(frontendId) == ::testing::AssertionFailure()) {
+ if (!mFrontend && createFrontend(frontendId) == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
+ mFrontendCallback->testOnEvent(mFrontend, settings);
+
status = mDemux->setFrontendDataSource(frontendId);
return ::testing::AssertionResult(status == Result::SUCCESS);
}
-::testing::AssertionResult TunerHidlTest::addSectionFilterToDemux() {
- Result status;
-
- if (createDemux() == ::testing::AssertionFailure()) {
- return ::testing::AssertionFailure();
- }
-
- // Create demux callback
- mDemuxCallback = new DemuxCallback();
-
- // Add section filter to the local demux
- mDemux->addFilter(DemuxFilterType::SECTION, FMQ_SIZE_4K, mDemuxCallback,
- [&](Result result, uint32_t filterId) {
- mFilterId = filterId;
- status = result;
- });
-
- // Add another section filter to the local demux
- mDemux->addFilter(DemuxFilterType::SECTION, FMQ_SIZE_4K, mDemuxCallback,
- [&](Result result, uint32_t filterId) {
- mFilterId = filterId;
- status = result;
- });
-
- // TODO Test configure the filter
-
- return ::testing::AssertionResult(status == Result::SUCCESS);
-}
-
-::testing::AssertionResult TunerHidlTest::addPesFilterToDemux() {
- Result status;
-
- if (createDemux() == ::testing::AssertionFailure()) {
- return ::testing::AssertionFailure();
- }
-
- // Create demux callback
- mDemuxCallback = new DemuxCallback();
-
- // Add PES filter to the local demux
- mDemux->addFilter(DemuxFilterType::PES, FMQ_SIZE_4K, mDemuxCallback,
- [&](Result result, uint32_t filterId) {
- mFilterId = filterId;
- status = result;
- });
-
- // Add another PES filter to the local demux
- mDemux->addFilter(DemuxFilterType::PES, FMQ_SIZE_4K, mDemuxCallback,
- [&](Result result, uint32_t filterId) {
- mFilterId = filterId;
- status = result;
- });
-
- // TODO Test configure the filter
-
- return ::testing::AssertionResult(status == Result::SUCCESS);
-}
-
-::testing::AssertionResult TunerHidlTest::getFilterMQDescriptor(sp<IDemux>& demux,
- const uint32_t filterId) {
- Result status;
-
- if (!demux) {
- return ::testing::AssertionFailure();
- }
-
- mDemux->getFilterQueueDesc(filterId, [&](Result result, const FilterMQDesc& filterMQDesc) {
- mFilterMQDescriptor = filterMQDesc;
- status = result;
- });
-
- return ::testing::AssertionResult(status == Result::SUCCESS);
-}
-
-::testing::AssertionResult TunerHidlTest::readSectionFilterDataOutput() {
- if (addSectionFilterToDemux() == ::testing::AssertionFailure() ||
- getFilterMQDescriptor(mDemux, mFilterId) == ::testing::AssertionFailure()) {
- return ::testing::AssertionFailure();
- }
-
- // Test start filter and read the output data
- mDemuxCallback->testOnSectionFilterEvent(mDemux, mFilterId, mFilterMQDescriptor);
-
- return ::testing::AssertionResult(true);
-}
-
-::testing::AssertionResult TunerHidlTest::readPesFilterDataOutput() {
- if (addPesFilterToDemux() == ::testing::AssertionFailure() ||
- getFilterMQDescriptor(mDemux, mFilterId) == ::testing::AssertionFailure()) {
- return ::testing::AssertionFailure();
- }
-
- // Test start filter and read the output data
- mDemuxCallback->testOnPesFilterEvent(mDemux, mFilterId, mFilterMQDescriptor);
-
- return ::testing::AssertionResult(true);
-}
-
::testing::AssertionResult TunerHidlTest::closeDemux() {
Result status;
- if (createDemux() == ::testing::AssertionFailure()) {
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
status = mDemux->close();
+ mDemux = nullptr;
return ::testing::AssertionResult(status == Result::SUCCESS);
}
@@ -569,7 +640,7 @@
return ::testing::AssertionFailure();
}
- if (createDemux() == ::testing::AssertionFailure()) {
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
@@ -585,14 +656,212 @@
::testing::AssertionResult TunerHidlTest::closeDescrambler() {
Result status;
- if (createDescrambler() == ::testing::AssertionFailure()) {
+ if (!mDescrambler && createDescrambler() == ::testing::AssertionFailure()) {
return ::testing::AssertionFailure();
}
status = mDescrambler->close();
+ mDescrambler = nullptr;
return ::testing::AssertionResult(status == Result::SUCCESS);
}
+::testing::AssertionResult TunerHidlTest::addInputToDemux(DemuxInputSettings setting) {
+ Result status;
+
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+
+ // Create demux callback
+ if (!mDemuxCallback) {
+ mDemuxCallback = new DemuxCallback();
+ }
+
+ // Add playback input to the local demux
+ status = mDemux->addInput(FMQ_SIZE_1M, mDemuxCallback);
+
+ if (status != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+
+ status = mDemux->configureInput(setting);
+
+ return ::testing::AssertionResult(status == Result::SUCCESS);
+}
+
+::testing::AssertionResult TunerHidlTest::getInputMQDescriptor() {
+ Result status;
+
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+
+ mDemux->getInputQueueDesc([&](Result result, const MQDesc& inputMQDesc) {
+ mInputMQDescriptor = inputMQDesc;
+ status = result;
+ });
+
+ return ::testing::AssertionResult(status == Result::SUCCESS);
+}
+
+::testing::AssertionResult TunerHidlTest::addFilterToDemux(DemuxFilterType type,
+ DemuxFilterSettings setting) {
+ Result status;
+
+ if (!mDemux && createDemux() == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+
+ // Create demux callback
+ if (!mDemuxCallback) {
+ mDemuxCallback = new DemuxCallback();
+ }
+
+ // Add filter to the local demux
+ mDemux->addFilter(type, FMQ_SIZE_4K, mDemuxCallback, [&](Result result, uint32_t filterId) {
+ // TODO use a map to save all the filter id and FMQ
+ mFilterId = filterId;
+ status = result;
+ });
+
+ if (status != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+
+ // Configure the filter
+ status = mDemux->configureFilter(mFilterId, setting);
+
+ return ::testing::AssertionResult(status == Result::SUCCESS);
+}
+
+::testing::AssertionResult TunerHidlTest::getFilterMQDescriptor(const uint32_t filterId) {
+ Result status;
+
+ if (!mDemux) {
+ return ::testing::AssertionFailure();
+ }
+
+ mDemux->getFilterQueueDesc(filterId, [&](Result result, const MQDesc& filterMQDesc) {
+ mFilterMQDescriptor = filterMQDesc;
+ status = result;
+ });
+
+ return ::testing::AssertionResult(status == Result::SUCCESS);
+}
+
+::testing::AssertionResult TunerHidlTest::playbackDataFlowTest(
+ vector<FilterConf> filterConf, InputConf inputConf, vector<string> /*goldenOutputFiles*/) {
+ Result status;
+ int filterIdsSize;
+ // Filter Configuration Module
+ for (int i = 0; i < filterConf.size(); i++) {
+ if (addFilterToDemux(filterConf[i].type, filterConf[i].setting) ==
+ ::testing::AssertionFailure() ||
+ // TODO use a map to save the FMQs/EvenFlags and pass to callback
+ getFilterMQDescriptor(mFilterId) == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+ filterIdsSize = mUsedFilterIds.size();
+ mUsedFilterIds.resize(filterIdsSize + 1);
+ mUsedFilterIds[filterIdsSize] = mFilterId;
+ mDemuxCallback->updateFilterMQ(mFilterId, mFilterMQDescriptor);
+ // mDemuxCallback->updateGoldenOutputMap(mFilterId, goldenOutputFiles[i]);
+ status = mDemux->startFilter(mFilterId);
+ if (status != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ }
+
+ // Playback Input Module
+ DemuxInputSettings inputSetting = inputConf.setting;
+ if (addInputToDemux(inputSetting) == ::testing::AssertionFailure() ||
+ getInputMQDescriptor() == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+ mDemuxCallback->startPlaybackInputThread(inputConf, mInputMQDescriptor);
+ status = mDemux->startInput();
+ if (status != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+
+ // Data Verify Module
+ mDemuxCallback->testFilterDataOutput();
+ mDemuxCallback->stopInputThread();
+
+ // Clean Up Module
+ for (int i = 0; i <= filterIdsSize; i++) {
+ if (mDemux->stopFilter(mUsedFilterIds[i]) != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ }
+ if (mDemux->stopInput() != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ return closeDemux();
+}
+
+::testing::AssertionResult TunerHidlTest::broadcastDataFlowTest(
+ vector<FilterConf> filterConf, vector<string> /*goldenOutputFiles*/) {
+ Result status;
+ hidl_vec<FrontendId> feIds;
+
+ mService->getFrontendIds([&](Result result, const hidl_vec<FrontendId>& frontendIds) {
+ status = result;
+ feIds = frontendIds;
+ });
+
+ if (feIds.size() == 0) {
+ ALOGW("[ WARN ] Frontend isn't available");
+ return ::testing::AssertionFailure();
+ }
+
+ FrontendDvbtSettings dvbt{
+ .frequency = 1000,
+ };
+ FrontendSettings settings;
+ settings.dvbt(dvbt);
+
+ if (createDemuxWithFrontend(feIds[0], settings) != ::testing::AssertionSuccess()) {
+ return ::testing::AssertionFailure();
+ }
+
+ int filterIdsSize;
+ // Filter Configuration Module
+ for (int i = 0; i < filterConf.size(); i++) {
+ if (addFilterToDemux(filterConf[i].type, filterConf[i].setting) ==
+ ::testing::AssertionFailure() ||
+ // TODO use a map to save the FMQs/EvenFlags and pass to callback
+ getFilterMQDescriptor(mFilterId) == ::testing::AssertionFailure()) {
+ return ::testing::AssertionFailure();
+ }
+ filterIdsSize = mUsedFilterIds.size();
+ mUsedFilterIds.resize(filterIdsSize + 1);
+ mUsedFilterIds[filterIdsSize] = mFilterId;
+ mDemuxCallback->updateFilterMQ(mFilterId, mFilterMQDescriptor);
+ status = mDemux->startFilter(mFilterId);
+ if (status != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ }
+
+ // Data Verify Module
+ mDemuxCallback->testFilterDataOutput();
+
+ // Clean Up Module
+ for (int i = 0; i <= filterIdsSize; i++) {
+ if (mDemux->stopFilter(mUsedFilterIds[i]) != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ }
+ if (mFrontend->stopTune() != Result::SUCCESS) {
+ return ::testing::AssertionFailure();
+ }
+ return closeDemux();
+}
+
+/*
+ * API STATUS TESTS
+ */
TEST_F(TunerHidlTest, CreateFrontend) {
Result status;
hidl_vec<FrontendId> feIds;
@@ -673,13 +942,7 @@
}
}
-TEST_F(TunerHidlTest, CreateDemux) {
- description("Create Demux");
-
- ASSERT_TRUE(createDemux());
-}
-
-TEST_F(TunerHidlTest, CreateDemuxWithFrontend) {
+/*TEST_F(TunerHidlTest, CreateDemuxWithFrontend) {
Result status;
hidl_vec<FrontendId> feIds;
@@ -694,55 +957,100 @@
return;
}
+ FrontendDvbtSettings dvbt{
+ .frequency = 1000,
+ };
+ FrontendSettings settings;
+ settings.dvbt(dvbt);
+
for (size_t i = 0; i < feIds.size(); i++) {
- ASSERT_TRUE(createDemuxWithFrontend(feIds[i]));
+ ASSERT_TRUE(createDemuxWithFrontend(feIds[i], settings));
+ mFrontend->stopTune();
}
-}
+}*/
-TEST_F(TunerHidlTest, AddSectionFilterToDemux) {
- description("Add a section filter to a created demux");
- ASSERT_TRUE(addSectionFilterToDemux());
-}
-
-TEST_F(TunerHidlTest, AddPesFilterToDemux) {
- description("Add a pes filter to a created demux");
- ASSERT_TRUE(addPesFilterToDemux());
-}
-
-TEST_F(TunerHidlTest, GetFilterMQDescriptor) {
- description("Get MQ Descriptor from a created filter");
- ASSERT_TRUE(addSectionFilterToDemux());
- ASSERT_TRUE(getFilterMQDescriptor(mDemux, mFilterId));
-}
-
-TEST_F(TunerHidlTest, ReadSectionFilterOutput) {
- description("Read data output from FMQ of a Section Filter");
- ASSERT_TRUE(readSectionFilterDataOutput());
-}
-
-TEST_F(TunerHidlTest, ReadPesFilterOutput) {
- description("Read data output from FMQ of a PES Filter");
- ASSERT_TRUE(readPesFilterDataOutput());
+TEST_F(TunerHidlTest, CreateDemux) {
+ description("Create Demux");
+ ASSERT_TRUE(createDemux());
}
TEST_F(TunerHidlTest, CloseDemux) {
description("Close Demux");
-
ASSERT_TRUE(closeDemux());
}
TEST_F(TunerHidlTest, CreateDescrambler) {
description("Create Descrambler");
-
ASSERT_TRUE(createDescrambler());
}
TEST_F(TunerHidlTest, CloseDescrambler) {
description("Close Descrambler");
-
ASSERT_TRUE(closeDescrambler());
}
+/*
+ * DATA FLOW TESTS
+ */
+TEST_F(TunerHidlTest, PlaybackDataFlowWithPesFilterTest) {
+ description("Feed ts data from playback and configure pes filter to get output");
+
+ // todo modulize the filter conf parser
+ vector<FilterConf> filterConf;
+ filterConf.resize(1);
+
+ DemuxFilterSettings filterSetting;
+ DemuxFilterPesDataSettings pesFilterSetting{
+ .tpid = 18,
+ };
+ filterSetting.pesData(pesFilterSetting);
+ FilterConf pesFilterConf{
+ .type = DemuxFilterType::PES,
+ .setting = filterSetting,
+ };
+ filterConf[0] = pesFilterConf;
+
+ DemuxInputSettings inputSetting{
+ .statusMask = 0xf,
+ .lowThreshold = 0x1000,
+ .highThreshold = 0x07fff,
+ .dataFormat = DemuxDataFormat::TS,
+ .packetSize = 188,
+ };
+
+ InputConf inputConf{
+ .inputDataFile = "/vendor/etc/test1.ts",
+ .setting = inputSetting,
+ };
+
+ vector<string> goldenOutputFiles;
+
+ ASSERT_TRUE(playbackDataFlowTest(filterConf, inputConf, goldenOutputFiles));
+}
+
+TEST_F(TunerHidlTest, BroadcastDataFlowWithPesFilterTest) {
+ description("Feed ts data from frontend and test with PES filter");
+
+ // todo modulize the filter conf parser
+ vector<FilterConf> filterConf;
+ filterConf.resize(1);
+
+ DemuxFilterSettings filterSetting;
+ DemuxFilterPesDataSettings pesFilterSetting{
+ .tpid = 18,
+ };
+ filterSetting.pesData(pesFilterSetting);
+ FilterConf pesFilterConf{
+ .type = DemuxFilterType::PES,
+ .setting = filterSetting,
+ };
+ filterConf[0] = pesFilterConf;
+
+ vector<string> goldenOutputFiles;
+
+ ASSERT_TRUE(broadcastDataFlowTest(filterConf, goldenOutputFiles));
+}
+
} // namespace
int main(int argc, char** argv) {
diff --git a/tv/tuner/README.md b/tv/tuner/README.md
new file mode 100644
index 0000000..aa1f62d
--- /dev/null
+++ b/tv/tuner/README.md
@@ -0,0 +1,12 @@
+# Tuner HALs
+
+## Overview
+
+TV specific tuners.
+
+See 1.0/ITuner.hal for an overview.
+
+*** note
+**Warning:** The HALs are not (yet) frozen, as the HAL definition is
+expected to evolve between Android releases.
+***
diff --git a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc b/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc
deleted file mode 100644
index ed7a562..0000000
--- a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.rc
+++ /dev/null
@@ -1,4 +0,0 @@
-service vendor.vibrator-1-3 /vendor/bin/hw/android.hardware.vibrator@1.3-service.example
- class hal
- user system
- group system
diff --git a/vibrator/1.4/Android.bp b/vibrator/1.4/Android.bp
new file mode 100644
index 0000000..cf31fcd
--- /dev/null
+++ b/vibrator/1.4/Android.bp
@@ -0,0 +1,22 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.vibrator@1.4",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "types.hal",
+ "IVibrator.hal",
+ "IVibratorCallback.hal",
+ ],
+ interfaces: [
+ "android.hardware.vibrator@1.0",
+ "android.hardware.vibrator@1.1",
+ "android.hardware.vibrator@1.2",
+ "android.hardware.vibrator@1.3",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/vibrator/1.4/IVibrator.hal b/vibrator/1.4/IVibrator.hal
new file mode 100644
index 0000000..913abe3
--- /dev/null
+++ b/vibrator/1.4/IVibrator.hal
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator@1.4;
+
+import @1.0::EffectStrength;
+import @1.3::Effect;
+import @1.0::Status;
+import @1.3::IVibrator;
+import IVibratorCallback;
+
+interface IVibrator extends @1.3::IVibrator {
+ /**
+ * Determine capabilities of the vibrator HAL.
+ */
+ getCapabilities() generates (bitfield<Capabilities> capabilities);
+
+ /**
+ * Turn on vibrator
+ *
+ * This function must only be called after the previous timeout has expired or
+ * was canceled (through off()).
+ * @param timeoutMs number of milliseconds to vibrate.
+ * @param callback A callback used to inform Frameworks of state change, if supported.
+ * @return vibratorOnRet whether vibrator command was successful or not.
+ */
+ on_1_4(uint32_t timeoutMs, IVibratorCallback callback) generates (Status vibratorOnRet);
+
+ /**
+ * Fire off a predefined haptic event.
+ *
+ * @param effect The type of haptic event to trigger.
+ * @param strength The intensity of haptic event to trigger.
+ * @param callback A callback used to inform Frameworks of state change, if supported.
+ * @return status Whether the effect was successfully performed or not. Must
+ * return Status::UNSUPPORTED_OPERATION if the effect is not supported.
+ * @return lengthMs The length of time the event is expected to take in
+ * milliseconds. This doesn't need to be perfectly accurate, but should be a reasonable
+ * approximation. Should be a positive, non-zero value if the returned status is Status::OK,
+ * and set to 0 otherwise.
+ */
+ perform_1_4(Effect effect, EffectStrength strength, IVibratorCallback callback)
+ generates (Status status, uint32_t lengthMs);
+};
diff --git a/vibrator/1.4/IVibratorCallback.hal b/vibrator/1.4/IVibratorCallback.hal
new file mode 100644
index 0000000..76281bc
--- /dev/null
+++ b/vibrator/1.4/IVibratorCallback.hal
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator@1.4;
+
+interface IVibratorCallback {
+ oneway onComplete();
+};
diff --git a/vibrator/1.4/types.hal b/vibrator/1.4/types.hal
new file mode 100644
index 0000000..acc49b1
--- /dev/null
+++ b/vibrator/1.4/types.hal
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.vibrator@1.4;
+
+enum Capabilities : uint32_t {
+ ON_COMPLETION_CALLBACK = 1 << 0,
+ PERFORM_COMPLETION_CALLBACK = 1 << 1,
+};
diff --git a/vibrator/1.3/example/Android.bp b/vibrator/1.4/vts/functional/Android.bp
similarity index 65%
copy from vibrator/1.3/example/Android.bp
copy to vibrator/1.4/vts/functional/Android.bp
index 07f1c26..4cdf3b6 100644
--- a/vibrator/1.3/example/Android.bp
+++ b/vibrator/1.4/vts/functional/Android.bp
@@ -12,22 +12,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
+//
-cc_binary {
- name: "android.hardware.vibrator@1.3-service.example",
- vendor: true,
- relative_install_path: "hw",
- init_rc: ["android.hardware.vibrator@1.3-service.example.rc"],
- vintf_fragments: ["android.hardware.vibrator@1.3-service.example.xml"],
- srcs: ["service.cpp", "Vibrator.cpp"],
- cflags: ["-Wall", "-Werror"],
- shared_libs: [
- "libhidlbase",
- "liblog",
- "libutils",
+cc_test {
+ name: "VtsHalVibratorV1_4TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: ["VtsHalVibratorV1_4TargetTest.cpp"],
+ static_libs: [
"android.hardware.vibrator@1.0",
"android.hardware.vibrator@1.1",
"android.hardware.vibrator@1.2",
"android.hardware.vibrator@1.3",
+ "android.hardware.vibrator@1.4",
],
+ test_suites: ["general-tests"],
}
+
diff --git a/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp b/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp
new file mode 100644
index 0000000..1b6abe9
--- /dev/null
+++ b/vibrator/1.4/vts/functional/VtsHalVibratorV1_4TargetTest.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "vibrator_hidl_hal_test"
+
+#include <android-base/logging.h>
+#include <android/hardware/vibrator/1.0/types.h>
+#include <android/hardware/vibrator/1.4/IVibrator.h>
+#include <gtest/gtest.h>
+#include <hidl/GtestPrinter.h>
+#include <hidl/ServiceManagement.h>
+
+#include <getopt.h>
+#include <unistd.h>
+
+#include <future>
+
+using ::android::sp;
+using ::android::hardware::hidl_bitfield;
+using ::android::hardware::hidl_enum_range;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::vibrator::V1_0::EffectStrength;
+using ::android::hardware::vibrator::V1_0::Status;
+using ::android::hardware::vibrator::V1_3::Effect;
+using ::android::hardware::vibrator::V1_4::Capabilities;
+using ::android::hardware::vibrator::V1_4::IVibrator;
+using ::android::hardware::vibrator::V1_4::IVibratorCallback;
+
+static uint32_t sCompletionLimitMs = UINT32_MAX;
+
+#define EXPECT_OK(ret) ASSERT_TRUE((ret).isOk())
+
+class CompletionCallback : public IVibratorCallback {
+ public:
+ CompletionCallback(std::function<void()> callback) : mCallback(callback) {}
+ Return<void> onComplete() override {
+ mCallback();
+ return Void();
+ }
+
+ private:
+ std::function<void()> mCallback;
+};
+
+class VibratorHidlTest_1_4 : public testing::TestWithParam<std::string> {
+ public:
+ virtual void SetUp() override {
+ vibrator = IVibrator::getService(GetParam());
+ ASSERT_NE(vibrator, nullptr);
+ capabilities = vibrator->getCapabilities();
+ }
+
+ virtual void TearDown() override {}
+
+ sp<IVibrator> vibrator;
+ hidl_bitfield<Capabilities> capabilities;
+};
+
+TEST_P(VibratorHidlTest_1_4, OnWithCallback) {
+ if (capabilities & Capabilities::ON_COMPLETION_CALLBACK) {
+ std::promise<void> completionPromise;
+ std::future<void> completionFuture{completionPromise.get_future()};
+ sp<CompletionCallback> callback =
+ new CompletionCallback([&completionPromise] { completionPromise.set_value(); });
+ uint32_t duration = 250;
+ std::chrono::milliseconds timeout{duration * 2};
+ EXPECT_EQ(Status::OK, vibrator->on_1_4(duration, callback));
+ EXPECT_EQ(completionFuture.wait_for(timeout), std::future_status::ready);
+ vibrator->off();
+ }
+}
+
+static void validatePerformEffectUnsupportedOperation(Status status, uint32_t lengthMs) {
+ ASSERT_EQ(Status::UNSUPPORTED_OPERATION, status);
+ ASSERT_EQ(static_cast<uint32_t>(0), lengthMs)
+ << "Effects that return UNSUPPORTED_OPERATION must have a duration of zero";
+}
+
+static void validatePerformEffect(Status status, uint32_t lengthMs) {
+ ASSERT_TRUE(status == Status::OK || status == Status::UNSUPPORTED_OPERATION);
+ if (status == Status::OK) {
+ ASSERT_LT(static_cast<uint32_t>(0), lengthMs)
+ << "Effects that return OK must return a positive duration";
+ } else {
+ validatePerformEffectUnsupportedOperation(status, lengthMs);
+ }
+}
+
+/*
+ * Test to make sure effects within the valid range return are either supported and return OK with
+ * a valid duration, or are unsupported and return UNSUPPORTED_OPERATION with a duration of 0.
+ */
+TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4) {
+ Status performStatus;
+ uint32_t performLength;
+ auto validateWrapper = [&](Status status, uint32_t lengthMs) {
+ performStatus = status;
+ performLength = lengthMs;
+ validatePerformEffect(status, lengthMs);
+ };
+ for (const auto& effect : hidl_enum_range<Effect>()) {
+ for (const auto& strength : hidl_enum_range<EffectStrength>()) {
+ std::promise<void> completionPromise;
+ std::future<void> completionFuture{completionPromise.get_future()};
+ sp<CompletionCallback> callback =
+ new CompletionCallback([&completionPromise] { completionPromise.set_value(); });
+ EXPECT_OK(vibrator->perform_1_4(effect, strength, callback, validateWrapper));
+ if (performStatus == Status::OK && performLength < sCompletionLimitMs &&
+ (capabilities & Capabilities::PERFORM_COMPLETION_CALLBACK)) {
+ std::chrono::milliseconds timeout{performLength * 2};
+ EXPECT_EQ(completionFuture.wait_for(timeout), std::future_status::ready);
+ }
+ }
+ }
+}
+
+/*
+ * Test to make sure effect values above the valid range are rejected.
+ */
+TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadEffects_AboveValidRange) {
+ Effect effect = *std::prev(hidl_enum_range<Effect>().end());
+ Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) + 1);
+ EXPECT_OK(vibrator->perform_1_4(badEffect, EffectStrength::LIGHT, nullptr,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure effect values below the valid range are rejected.
+ */
+TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadEffects_BelowValidRange) {
+ Effect effect = *hidl_enum_range<Effect>().begin();
+ Effect badEffect = static_cast<Effect>(static_cast<int32_t>(effect) - 1);
+ EXPECT_OK(vibrator->perform_1_4(badEffect, EffectStrength::LIGHT, nullptr,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure strength values above the valid range are rejected.
+ */
+TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadStrength_AboveValidRange) {
+ EffectStrength strength = *std::prev(hidl_enum_range<EffectStrength>().end());
+ EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) + 1);
+ EXPECT_OK(vibrator->perform_1_4(Effect::THUD, badStrength, nullptr,
+ validatePerformEffectUnsupportedOperation));
+}
+
+/*
+ * Test to make sure strength values below the valid range are rejected.
+ */
+TEST_P(VibratorHidlTest_1_4, PerformEffect_1_4_BadStrength_BelowValidRange) {
+ EffectStrength strength = *hidl_enum_range<EffectStrength>().begin();
+ EffectStrength badStrength = static_cast<EffectStrength>(static_cast<int32_t>(strength) - 1);
+ EXPECT_OK(vibrator->perform_1_4(Effect::THUD, badStrength, nullptr,
+ validatePerformEffectUnsupportedOperation));
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ PerInstance, VibratorHidlTest_1_4,
+ testing::ValuesIn(android::hardware::getAllHalInstanceNames(IVibrator::descriptor)),
+ android::hardware::PrintInstanceNameToString);
+
+enum {
+ OPTION_COMPLETION_LIMIT_MS,
+};
+
+int main(int argc, char** argv) {
+ struct option options[] = {
+ {"completion-limit-ms", required_argument, 0, OPTION_COMPLETION_LIMIT_MS}, {}};
+
+ printf("Running main() from %s\n", __FILE__);
+ testing::InitGoogleTest(&argc, argv);
+
+ while (true) {
+ int opt = getopt_long(argc, argv, "", options, nullptr);
+ if (opt == -1) {
+ break;
+ }
+ switch (opt) {
+ case OPTION_COMPLETION_LIMIT_MS:
+ std::istringstream(optarg) >> sCompletionLimitMs;
+ break;
+ default:
+ printf("Unrecognized option\n");
+ return -EINVAL;
+ }
+ }
+
+ return RUN_ALL_TESTS();
+}
diff --git a/vibrator/1.3/example/Android.bp b/vibrator/1.x/example/Android.bp
similarity index 81%
rename from vibrator/1.3/example/Android.bp
rename to vibrator/1.x/example/Android.bp
index 07f1c26..afbbb75 100644
--- a/vibrator/1.3/example/Android.bp
+++ b/vibrator/1.x/example/Android.bp
@@ -14,11 +14,11 @@
// limitations under the License.
cc_binary {
- name: "android.hardware.vibrator@1.3-service.example",
+ name: "android.hardware.vibrator@1.x-service.example",
vendor: true,
relative_install_path: "hw",
- init_rc: ["android.hardware.vibrator@1.3-service.example.rc"],
- vintf_fragments: ["android.hardware.vibrator@1.3-service.example.xml"],
+ init_rc: ["android.hardware.vibrator@1.x-service.example.rc"],
+ vintf_fragments: ["android.hardware.vibrator@1.x-service.example.xml"],
srcs: ["service.cpp", "Vibrator.cpp"],
cflags: ["-Wall", "-Werror"],
shared_libs: [
@@ -29,5 +29,6 @@
"android.hardware.vibrator@1.1",
"android.hardware.vibrator@1.2",
"android.hardware.vibrator@1.3",
+ "android.hardware.vibrator@1.4",
],
}
diff --git a/vibrator/1.3/example/OWNERS b/vibrator/1.x/example/OWNERS
similarity index 100%
rename from vibrator/1.3/example/OWNERS
rename to vibrator/1.x/example/OWNERS
diff --git a/vibrator/1.3/example/Vibrator.cpp b/vibrator/1.x/example/Vibrator.cpp
similarity index 86%
rename from vibrator/1.3/example/Vibrator.cpp
rename to vibrator/1.x/example/Vibrator.cpp
index b529437..4dd1cb9 100644
--- a/vibrator/1.3/example/Vibrator.cpp
+++ b/vibrator/1.x/example/Vibrator.cpp
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace vibrator {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
static constexpr uint32_t MS_PER_S = 1000;
@@ -100,7 +100,25 @@
}
}
-Return<void> Vibrator::perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) {
+Return<void> Vibrator::perform_1_3(V1_3::Effect effect, EffectStrength strength,
+ perform_cb _hidl_cb) {
+ return perform<decltype(effect)>(effect, strength, _hidl_cb);
+}
+
+// Methods from ::android::hardware::vibrator::V1_4::IVibrator follow.
+
+Return<hidl_bitfield<Capabilities>> Vibrator::getCapabilities() {
+ return Capabilities::ON_COMPLETION_CALLBACK | Capabilities::PERFORM_COMPLETION_CALLBACK;
+}
+
+Return<Status> Vibrator::on_1_4(uint32_t timeoutMs, const sp<IVibratorCallback>& callback) {
+ mCallback = callback;
+ return on(timeoutMs);
+}
+
+Return<void> Vibrator::perform_1_4(V1_3::Effect effect, EffectStrength strength,
+ const sp<IVibratorCallback>& callback, perform_cb _hidl_cb) {
+ mCallback = callback;
return perform<decltype(effect)>(effect, strength, _hidl_cb);
}
@@ -148,6 +166,14 @@
return Status::UNSUPPORTED_OPERATION;
} else {
ALOGI("Enabled: %s -> %s\n", mEnabled ? "true" : "false", enabled ? "true" : "false");
+ if (mEnabled && !enabled) {
+ if (auto callback = mCallback) {
+ mCallback = nullptr;
+ if (auto ret = callback->onComplete(); !ret.isOk()) {
+ ALOGE("Failed completion callback: %s", ret.description().c_str());
+ }
+ }
+ }
mEnabled = enabled;
return Status::OK;
}
@@ -271,7 +297,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace vibrator
} // namespace hardware
} // namespace android
diff --git a/vibrator/1.3/example/Vibrator.h b/vibrator/1.x/example/Vibrator.h
similarity index 75%
rename from vibrator/1.3/example/Vibrator.h
rename to vibrator/1.x/example/Vibrator.h
index 5180774..ff63431 100644
--- a/vibrator/1.3/example/Vibrator.h
+++ b/vibrator/1.x/example/Vibrator.h
@@ -13,20 +13,21 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
-#define ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
+#ifndef ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
+#define ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
-#include <android/hardware/vibrator/1.3/IVibrator.h>
+#include <android/hardware/vibrator/1.4/IVibrator.h>
#include <hidl/Status.h>
namespace android {
namespace hardware {
namespace vibrator {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using android::hardware::vibrator::V1_0::EffectStrength;
using android::hardware::vibrator::V1_0::Status;
+using android::hardware::vibrator::V1_3::Effect;
class Vibrator : public IVibrator {
public:
@@ -51,7 +52,14 @@
// Methods from ::android::hardware::vibrator::V1_3::IVibrator follow.
Return<bool> supportsExternalControl() override;
Return<Status> setExternalControl(bool enabled) override;
- Return<void> perform_1_3(Effect effect, EffectStrength strength, perform_cb _hidl_cb) override;
+ Return<void> perform_1_3(V1_3::Effect effect, EffectStrength strength,
+ perform_cb _hidl_cb) override;
+
+ // Methods from ::android::hardware::vibrator::V1_4::IVibrator follow.
+ Return<hidl_bitfield<Capabilities>> getCapabilities() override;
+ Return<Status> on_1_4(uint32_t timeoutMs, const sp<IVibratorCallback>& callback) override;
+ Return<void> perform_1_4(V1_3::Effect effect, EffectStrength strength,
+ const sp<IVibratorCallback>& callback, perform_cb _hidl_cb) override;
private:
Return<void> perform(Effect effect, EffectStrength strength, perform_cb _hidl_cb);
@@ -72,11 +80,12 @@
bool mExternalControl{false};
std::mutex mMutex;
timer_t mTimer{nullptr};
+ sp<IVibratorCallback> mCallback{nullptr};
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace vibrator
} // namespace hardware
} // namespace android
-#endif // ANDROID_HARDWARE_VIBRATOR_V1_3_VIBRATOR_H
+#endif // ANDROID_HARDWARE_VIBRATOR_V1_x_VIBRATOR_H
diff --git a/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc b/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc
new file mode 100644
index 0000000..4893db6
--- /dev/null
+++ b/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.rc
@@ -0,0 +1,4 @@
+service vendor.vibrator-1-x /vendor/bin/hw/android.hardware.vibrator@1.x-service.example
+ class hal
+ user system
+ group system
diff --git a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml b/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml
similarity index 89%
rename from vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
rename to vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml
index 172aa21..ebc8c4b 100644
--- a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
+++ b/vibrator/1.x/example/android.hardware.vibrator@1.x-service.example.xml
@@ -2,7 +2,7 @@
<hal format="hidl">
<name>android.hardware.vibrator</name>
<transport>hwbinder</transport>
- <version>1.3</version>
+ <version>1.4</version>
<interface>
<name>IVibrator</name>
<instance>default</instance>
diff --git a/vibrator/1.3/example/service.cpp b/vibrator/1.x/example/service.cpp
similarity index 82%
rename from vibrator/1.3/example/service.cpp
rename to vibrator/1.x/example/service.cpp
index 449996e..13c6691 100644
--- a/vibrator/1.3/example/service.cpp
+++ b/vibrator/1.x/example/service.cpp
@@ -13,17 +13,17 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-#define LOG_TAG "android.hardware.vibrator@1.3-service.example"
+#define LOG_TAG "android.hardware.vibrator@1.x-service.example"
-#include <android/hardware/vibrator/1.3/IVibrator.h>
+#include <android/hardware/vibrator/1.4/IVibrator.h>
#include <hidl/HidlTransportSupport.h>
#include "Vibrator.h"
using android::hardware::configureRpcThreadpool;
using android::hardware::joinRpcThreadpool;
-using android::hardware::vibrator::V1_3::IVibrator;
-using android::hardware::vibrator::V1_3::implementation::Vibrator;
+using android::hardware::vibrator::V1_4::IVibrator;
+using android::hardware::vibrator::V1_4::implementation::Vibrator;
using namespace android;
status_t registerVibratorService() {
diff --git a/wifi/1.0/vts/functional/Android.bp b/wifi/1.0/vts/functional/Android.bp
index 397ad17..6fa6e7e 100644
--- a/wifi/1.0/vts/functional/Android.bp
+++ b/wifi/1.0/vts/functional/Android.bp
@@ -28,7 +28,9 @@
shared_libs: [
"libnativehelper",
],
- static_libs: ["android.hardware.wifi@1.0"],
+ static_libs: [
+ "android.hardware.wifi@1.0",
+ ],
}
cc_test {
@@ -36,7 +38,6 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"VtsHalWifiV1_0TargetTest.cpp",
- "wifi_ap_iface_hidl_test.cpp",
"wifi_chip_hidl_test.cpp",
"wifi_p2p_iface_hidl_test.cpp",
"wifi_rtt_controller_hidl_test.cpp",
@@ -52,11 +53,14 @@
test_suites: ["general-tests"],
}
+// These tests are split out so that they can be conditioned on presence of the
+// "android.hardware.wifi.aware" feature.
cc_test {
name: "VtsHalWifiNanV1_0TargetTest",
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"VtsHalWifiV1_0TargetTest.cpp",
+ "wifi_chip_hidl_nan_test.cpp",
"wifi_nan_iface_hidl_test.cpp",
],
static_libs: [
@@ -65,3 +69,20 @@
],
test_suites: ["general-tests"],
}
+
+// These tests are split out so that they can be conditioned on presence of
+// the hostapd HAL, which indicates SoftAP support.
+cc_test {
+ name: "VtsHalWifiApV1_0TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "VtsHalWifiV1_0TargetTest.cpp",
+ "wifi_ap_iface_hidl_test.cpp",
+ "wifi_chip_hidl_ap_test.cpp",
+ ],
+ static_libs: [
+ "VtsHalWifiV1_0TargetTestUtil",
+ "android.hardware.wifi@1.0",
+ ],
+ test_suites: ["general-tests"],
+}
diff --git a/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp b/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
index e7b8593..9d25014 100644
--- a/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
+++ b/wifi/1.0/vts/functional/VtsHalWifiV1_0TargetTest.cpp
@@ -41,10 +41,7 @@
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
gEnv->init(&argc, argv);
- int status = gEnv->initFromOptions(argc, argv);
- if (status == 0) {
- status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- }
+ int status = RUN_ALL_TESTS();
+ LOG(INFO) << "Test result = " << status;
return status;
}
diff --git a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
index e5762f2..c55221d 100644
--- a/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -29,21 +29,17 @@
using ::android::hardware::wifi::V1_0::WifiStatusCode;
using ::android::sp;
-extern WifiHidlEnvironment* gEnv;
-
/**
* Fixture to use for all AP Iface HIDL interface tests.
*/
class WifiApIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
virtual void SetUp() override {
- if (!gEnv->isSoftApOn) return;
wifi_ap_iface_ = getWifiApIface();
ASSERT_NE(nullptr, wifi_ap_iface_.get());
}
virtual void TearDown() override {
- if (!gEnv->isSoftApOn) return;
stopWifi();
}
@@ -57,7 +53,6 @@
* successfully created.
*/
TEST(WifiApIfaceHidlTestNoFixture, Create) {
- if (!gEnv->isSoftApOn) return;
EXPECT_NE(nullptr, getWifiApIface().get());
stopWifi();
}
@@ -67,7 +62,6 @@
* Ensures that the correct interface type is returned for AP interface.
*/
TEST_F(WifiApIfaceHidlTest, GetType) {
- if (!gEnv->isSoftApOn) return;
const auto& status_and_type = HIDL_INVOKE(wifi_ap_iface_, getType);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_type.first.code);
EXPECT_EQ(IfaceType::AP, status_and_type.second);
@@ -79,7 +73,6 @@
* status code.
*/
TEST_F(WifiApIfaceHidlTest, SetCountryCode) {
- if (!gEnv->isSoftApOn) return;
const android::hardware::hidl_array<int8_t, 2> kCountryCode{
std::array<int8_t, 2>{{0x55, 0x53}}};
EXPECT_EQ(WifiStatusCode::SUCCESS,
@@ -91,7 +84,6 @@
* Ensures that we can retrieve valid frequencies for 2.4 GHz band.
*/
TEST_F(WifiApIfaceHidlTest, GetValidFrequenciesForBand) {
- if (!gEnv->isSoftApOn) return;
const auto& status_and_freqs = HIDL_INVOKE(
wifi_ap_iface_, getValidFrequenciesForBand, WifiBand::BAND_24GHZ);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_freqs.first.code);
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
new file mode 100644
index 0000000..232ffdd
--- /dev/null
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_ap_test.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <android/hardware/wifi/1.0/IWifiChip.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+
+#include "wifi_hidl_call_util.h"
+#include "wifi_hidl_test_utils.h"
+
+using ::android::sp;
+using ::android::hardware::wifi::V1_0::ChipModeId;
+using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifiApIface;
+using ::android::hardware::wifi::V1_0::IWifiChip;
+using ::android::hardware::wifi::V1_0::IWifiIface;
+using ::android::hardware::wifi::V1_0::WifiStatus;
+using ::android::hardware::wifi::V1_0::WifiStatusCode;
+
+/**
+ * Fixture for IWifiChip tests that are conditioned on SoftAP support.
+ */
+class WifiChipHidlApTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ wifi_chip_ = getWifiChip();
+ ASSERT_NE(nullptr, wifi_chip_.get());
+ }
+
+ virtual void TearDown() override { stopWifi(); }
+
+ protected:
+ // Helper function to configure the Chip in one of the supported modes.
+ // Most of the non-mode-configuration-related methods require chip
+ // to be first configured.
+ ChipModeId configureChipForIfaceType(IfaceType type, bool expectSuccess) {
+ ChipModeId mode_id;
+ EXPECT_EQ(expectSuccess,
+ configureChipToSupportIfaceType(wifi_chip_, type, &mode_id));
+ return mode_id;
+ }
+
+ std::string getIfaceName(const sp<IWifiIface>& iface) {
+ const auto& status_and_name = HIDL_INVOKE(iface, getName);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_name.first.code);
+ return status_and_name.second;
+ }
+
+ WifiStatusCode createApIface(sp<IWifiApIface>* ap_iface) {
+ const auto& status_and_iface = HIDL_INVOKE(wifi_chip_, createApIface);
+ *ap_iface = status_and_iface.second;
+ return status_and_iface.first.code;
+ }
+
+ WifiStatusCode removeApIface(const std::string& name) {
+ return HIDL_INVOKE(wifi_chip_, removeApIface, name).code;
+ }
+
+ sp<IWifiChip> wifi_chip_;
+};
+
+/*
+ * CreateApIface
+ * Configures the chip in AP mode and ensures that at least 1 iface creation
+ * succeeds.
+ */
+TEST_F(WifiChipHidlApTest, CreateApIface) {
+ configureChipForIfaceType(IfaceType::AP, true);
+
+ sp<IWifiApIface> iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&iface));
+ EXPECT_NE(nullptr, iface.get());
+}
+
+/*
+ * GetApIfaceNames
+ * Configures the chip in AP mode and ensures that the iface list is empty
+ * before creating the iface. Then, create the iface and ensure that
+ * iface name is returned via the list.
+ */
+TEST_F(WifiChipHidlApTest, GetApIfaceNames) {
+ configureChipForIfaceType(IfaceType::AP, true);
+
+ const auto& status_and_iface_names1 =
+ HIDL_INVOKE(wifi_chip_, getApIfaceNames);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names1.first.code);
+ EXPECT_EQ(0u, status_and_iface_names1.second.size());
+
+ sp<IWifiApIface> iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&iface));
+ EXPECT_NE(nullptr, iface.get());
+
+ std::string iface_name = getIfaceName(iface);
+ const auto& status_and_iface_names2 =
+ HIDL_INVOKE(wifi_chip_, getApIfaceNames);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names2.first.code);
+ EXPECT_EQ(1u, status_and_iface_names2.second.size());
+ EXPECT_EQ(iface_name, status_and_iface_names2.second[0]);
+
+ EXPECT_EQ(WifiStatusCode::SUCCESS, removeApIface(iface_name));
+ const auto& status_and_iface_names3 =
+ HIDL_INVOKE(wifi_chip_, getApIfaceNames);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names3.first.code);
+ EXPECT_EQ(0u, status_and_iface_names3.second.size());
+}
+
+/*
+ * GetApIface
+ * Configures the chip in AP mode and create an iface. Then, retrieve
+ * the iface object using the correct name and ensure any other name
+ * doesn't retrieve an iface object.
+ */
+TEST_F(WifiChipHidlApTest, GetApIface) {
+ configureChipForIfaceType(IfaceType::AP, true);
+
+ sp<IWifiApIface> ap_iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&ap_iface));
+ EXPECT_NE(nullptr, ap_iface.get());
+
+ std::string iface_name = getIfaceName(ap_iface);
+ const auto& status_and_iface1 =
+ HIDL_INVOKE(wifi_chip_, getApIface, iface_name);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface1.first.code);
+ EXPECT_NE(nullptr, status_and_iface1.second.get());
+
+ std::string invalid_name = iface_name + "0";
+ const auto& status_and_iface2 =
+ HIDL_INVOKE(wifi_chip_, getApIface, invalid_name);
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, status_and_iface2.first.code);
+ EXPECT_EQ(nullptr, status_and_iface2.second.get());
+}
+
+/*
+ * RemoveApIface
+ * Configures the chip in AP mode and create an iface. Then, remove
+ * the iface object using the correct name and ensure any other name
+ * doesn't remove the iface.
+ */
+TEST_F(WifiChipHidlApTest, RemoveApIface) {
+ configureChipForIfaceType(IfaceType::AP, true);
+
+ sp<IWifiApIface> ap_iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&ap_iface));
+ EXPECT_NE(nullptr, ap_iface.get());
+
+ std::string iface_name = getIfaceName(ap_iface);
+ std::string invalid_name = iface_name + "0";
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeApIface(invalid_name));
+ EXPECT_EQ(WifiStatusCode::SUCCESS, removeApIface(iface_name));
+
+ // No such iface exists now. So, this should return failure.
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeApIface(iface_name));
+}
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp
new file mode 100644
index 0000000..595f23a
--- /dev/null
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_nan_test.cpp
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <android/hardware/wifi/1.0/IWifiChip.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+
+#include "wifi_hidl_call_util.h"
+#include "wifi_hidl_test_utils.h"
+
+using ::android::sp;
+using ::android::hardware::wifi::V1_0::ChipModeId;
+using ::android::hardware::wifi::V1_0::IfaceType;
+using ::android::hardware::wifi::V1_0::IWifiChip;
+using ::android::hardware::wifi::V1_0::IWifiIface;
+using ::android::hardware::wifi::V1_0::IWifiNanIface;
+using ::android::hardware::wifi::V1_0::WifiStatus;
+using ::android::hardware::wifi::V1_0::WifiStatusCode;
+
+/**
+ * Fixture for IWifiChip tests that are conditioned on NAN support.
+ */
+class WifiChipHidlNanTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ wifi_chip_ = getWifiChip();
+ ASSERT_NE(nullptr, wifi_chip_.get());
+ }
+
+ virtual void TearDown() override { stopWifi(); }
+
+ protected:
+ // Helper function to configure the Chip in one of the supported modes.
+ // Most of the non-mode-configuration-related methods require chip
+ // to be first configured.
+ ChipModeId configureChipForIfaceType(IfaceType type, bool expectSuccess) {
+ ChipModeId mode_id;
+ EXPECT_EQ(expectSuccess,
+ configureChipToSupportIfaceType(wifi_chip_, type, &mode_id));
+ return mode_id;
+ }
+
+ std::string getIfaceName(const sp<IWifiIface>& iface) {
+ const auto& status_and_name = HIDL_INVOKE(iface, getName);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_name.first.code);
+ return status_and_name.second;
+ }
+
+ WifiStatusCode createNanIface(sp<IWifiNanIface>* nan_iface) {
+ const auto& status_and_iface = HIDL_INVOKE(wifi_chip_, createNanIface);
+ *nan_iface = status_and_iface.second;
+ return status_and_iface.first.code;
+ }
+
+ WifiStatusCode removeNanIface(const std::string& name) {
+ return HIDL_INVOKE(wifi_chip_, removeNanIface, name).code;
+ }
+
+ sp<IWifiChip> wifi_chip_;
+};
+
+/*
+ * CreateNanIface
+ * Configures the chip in NAN mode and ensures that at least 1 iface creation
+ * succeeds.
+ */
+TEST_F(WifiChipHidlNanTest, CreateNanIface) {
+ configureChipForIfaceType(IfaceType::NAN, true);
+
+ sp<IWifiNanIface> iface;
+ ASSERT_EQ(WifiStatusCode::SUCCESS, createNanIface(&iface));
+ EXPECT_NE(nullptr, iface.get());
+}
+
+/*
+ * GetNanIfaceNames
+ * Configures the chip in NAN mode and ensures that the iface list is empty
+ * before creating the iface. Then, create the iface and ensure that
+ * iface name is returned via the list.
+ */
+TEST_F(WifiChipHidlNanTest, GetNanIfaceNames) {
+ configureChipForIfaceType(IfaceType::NAN, true);
+
+ const auto& status_and_iface_names1 =
+ HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
+ ASSERT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names1.first.code);
+ EXPECT_EQ(0u, status_and_iface_names1.second.size());
+
+ sp<IWifiNanIface> iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&iface));
+ EXPECT_NE(nullptr, iface.get());
+
+ std::string iface_name = getIfaceName(iface);
+ const auto& status_and_iface_names2 =
+ HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names2.first.code);
+ EXPECT_EQ(1u, status_and_iface_names2.second.size());
+ EXPECT_EQ(iface_name, status_and_iface_names2.second[0]);
+
+ EXPECT_EQ(WifiStatusCode::SUCCESS, removeNanIface(iface_name));
+ const auto& status_and_iface_names3 =
+ HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names3.first.code);
+ EXPECT_EQ(0u, status_and_iface_names3.second.size());
+}
+
+/*
+ * GetNanIface
+ * Configures the chip in NAN mode and create an iface. Then, retrieve
+ * the iface object using the correct name and ensure any other name
+ * doesn't retrieve an iface object.
+ */
+TEST_F(WifiChipHidlNanTest, GetNanIface) {
+ configureChipForIfaceType(IfaceType::NAN, true);
+
+ sp<IWifiNanIface> nan_iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&nan_iface));
+ EXPECT_NE(nullptr, nan_iface.get());
+
+ std::string iface_name = getIfaceName(nan_iface);
+ const auto& status_and_iface1 =
+ HIDL_INVOKE(wifi_chip_, getNanIface, iface_name);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface1.first.code);
+ EXPECT_NE(nullptr, status_and_iface1.second.get());
+
+ std::string invalid_name = iface_name + "0";
+ const auto& status_and_iface2 =
+ HIDL_INVOKE(wifi_chip_, getNanIface, invalid_name);
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, status_and_iface2.first.code);
+ EXPECT_EQ(nullptr, status_and_iface2.second.get());
+}
+
+/*
+ * RemoveNanIface
+ * Configures the chip in NAN mode and create an iface. Then, remove
+ * the iface object using the correct name and ensure any other name
+ * doesn't remove the iface.
+ */
+TEST_F(WifiChipHidlNanTest, RemoveNanIface) {
+ configureChipForIfaceType(IfaceType::NAN, true);
+
+ sp<IWifiNanIface> nan_iface;
+ EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&nan_iface));
+ EXPECT_NE(nullptr, nan_iface.get());
+
+ std::string iface_name = getIfaceName(nan_iface);
+ std::string invalid_name = iface_name + "0";
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeNanIface(invalid_name));
+
+ EXPECT_EQ(WifiStatusCode::SUCCESS, removeNanIface(iface_name));
+
+ // No such iface exists now. So, this should return failure.
+ EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeNanIface(iface_name));
+}
diff --git a/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp b/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
index 1b7e821..2601b78 100644
--- a/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
+++ b/wifi/1.0/vts/functional/wifi_chip_hidl_test.cpp
@@ -36,9 +36,7 @@
using ::android::hardware::wifi::V1_0::WifiStatus;
using ::android::hardware::wifi::V1_0::WifiStatusCode;
using ::android::hardware::wifi::V1_0::IWifiChip;
-using ::android::hardware::wifi::V1_0::IWifiApIface;
using ::android::hardware::wifi::V1_0::IWifiIface;
-using ::android::hardware::wifi::V1_0::IWifiNanIface;
using ::android::hardware::wifi::V1_0::IWifiP2pIface;
using ::android::hardware::wifi::V1_0::IWifiRttController;
using ::android::hardware::wifi::V1_0::IWifiStaIface;
@@ -64,7 +62,10 @@
} // namespace
/**
- * Fixture to use for all Wifi chip HIDL interface tests.
+ * Fixture for IWifiChip tests.
+ *
+ * Tests that require SoftAP or NAN support should go into WifiChipHidlApTest or
+ * WifiChipHidlNanTest respectively.
*/
class WifiChipHidlTest : public ::testing::VtsHalHidlTargetTestBase {
public:
@@ -114,26 +115,6 @@
return status_and_name.second;
}
- WifiStatusCode createApIface(sp<IWifiApIface>* ap_iface) {
- const auto& status_and_iface = HIDL_INVOKE(wifi_chip_, createApIface);
- *ap_iface = status_and_iface.second;
- return status_and_iface.first.code;
- }
-
- WifiStatusCode removeApIface(const std::string& name) {
- return HIDL_INVOKE(wifi_chip_, removeApIface, name).code;
- }
-
- WifiStatusCode createNanIface(sp<IWifiNanIface>* nan_iface) {
- const auto& status_and_iface = HIDL_INVOKE(wifi_chip_, createNanIface);
- *nan_iface = status_and_iface.second;
- return status_and_iface.first.code;
- }
-
- WifiStatusCode removeNanIface(const std::string& name) {
- return HIDL_INVOKE(wifi_chip_, removeNanIface, name).code;
- }
-
WifiStatusCode createP2pIface(sp<IWifiP2pIface>* p2p_iface) {
const auto& status_and_iface = HIDL_INVOKE(wifi_chip_, createP2pIface);
*p2p_iface = status_and_iface.second;
@@ -360,201 +341,6 @@
}
/*
- * CreateApIface
- * Configures the chip in AP mode and ensures that at least 1 iface creation
- * succeeds.
- */
-TEST_F(WifiChipHidlTest, CreateApIface) {
- if (!gEnv->isSoftApOn) return;
- configureChipForIfaceType(IfaceType::AP, true);
-
- sp<IWifiApIface> iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&iface));
- EXPECT_NE(nullptr, iface.get());
-}
-
-/*
- * GetApIfaceNames
- * Configures the chip in AP mode and ensures that the iface list is empty
- * before creating the iface. Then, create the iface and ensure that
- * iface name is returned via the list.
- */
-TEST_F(WifiChipHidlTest, GetApIfaceNames) {
- if (!gEnv->isSoftApOn) return;
- configureChipForIfaceType(IfaceType::AP, true);
-
- const auto& status_and_iface_names1 =
- HIDL_INVOKE(wifi_chip_, getApIfaceNames);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names1.first.code);
- EXPECT_EQ(0u, status_and_iface_names1.second.size());
-
- sp<IWifiApIface> iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&iface));
- EXPECT_NE(nullptr, iface.get());
-
- std::string iface_name = getIfaceName(iface);
- const auto& status_and_iface_names2 =
- HIDL_INVOKE(wifi_chip_, getApIfaceNames);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names2.first.code);
- EXPECT_EQ(1u, status_and_iface_names2.second.size());
- EXPECT_EQ(iface_name, status_and_iface_names2.second[0]);
-
- EXPECT_EQ(WifiStatusCode::SUCCESS, removeApIface(iface_name));
- const auto& status_and_iface_names3 =
- HIDL_INVOKE(wifi_chip_, getApIfaceNames);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names3.first.code);
- EXPECT_EQ(0u, status_and_iface_names3.second.size());
-}
-
-/*
- * GetApIface
- * Configures the chip in AP mode and create an iface. Then, retrieve
- * the iface object using the correct name and ensure any other name
- * doesn't retrieve an iface object.
- */
-TEST_F(WifiChipHidlTest, GetApIface) {
- if (!gEnv->isSoftApOn) return;
- configureChipForIfaceType(IfaceType::AP, true);
-
- sp<IWifiApIface> ap_iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&ap_iface));
- EXPECT_NE(nullptr, ap_iface.get());
-
- std::string iface_name = getIfaceName(ap_iface);
- const auto& status_and_iface1 =
- HIDL_INVOKE(wifi_chip_, getApIface, iface_name);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface1.first.code);
- EXPECT_NE(nullptr, status_and_iface1.second.get());
-
- std::string invalid_name = iface_name + "0";
- const auto& status_and_iface2 =
- HIDL_INVOKE(wifi_chip_, getApIface, invalid_name);
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, status_and_iface2.first.code);
- EXPECT_EQ(nullptr, status_and_iface2.second.get());
-}
-
-/*
- * RemoveApIface
- * Configures the chip in AP mode and create an iface. Then, remove
- * the iface object using the correct name and ensure any other name
- * doesn't remove the iface.
- */
-TEST_F(WifiChipHidlTest, RemoveApIface) {
- if (!gEnv->isSoftApOn) return;
- configureChipForIfaceType(IfaceType::AP, true);
-
- sp<IWifiApIface> ap_iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createApIface(&ap_iface));
- EXPECT_NE(nullptr, ap_iface.get());
-
- std::string iface_name = getIfaceName(ap_iface);
- std::string invalid_name = iface_name + "0";
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeApIface(invalid_name));
- EXPECT_EQ(WifiStatusCode::SUCCESS, removeApIface(iface_name));
-
- // No such iface exists now. So, this should return failure.
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeApIface(iface_name));
-}
-
-/*
- * CreateNanIface
- * Configures the chip in NAN mode and ensures that at least 1 iface creation
- * succeeds.
- */
-TEST_F(WifiChipHidlTest, CreateNanIface) {
- if (!gEnv->isNanOn) return;
- configureChipForIfaceType(IfaceType::NAN, gEnv->isNanOn);
-
- sp<IWifiNanIface> iface;
- ASSERT_EQ(WifiStatusCode::SUCCESS, createNanIface(&iface));
- EXPECT_NE(nullptr, iface.get());
-}
-
-/*
- * GetNanIfaceNames
- * Configures the chip in NAN mode and ensures that the iface list is empty
- * before creating the iface. Then, create the iface and ensure that
- * iface name is returned via the list.
- */
-TEST_F(WifiChipHidlTest, GetNanIfaceNames) {
- if (!gEnv->isNanOn) return;
- configureChipForIfaceType(IfaceType::NAN, gEnv->isNanOn);
-
- const auto& status_and_iface_names1 =
- HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
- ASSERT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names1.first.code);
- EXPECT_EQ(0u, status_and_iface_names1.second.size());
-
- sp<IWifiNanIface> iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&iface));
- EXPECT_NE(nullptr, iface.get());
-
- std::string iface_name = getIfaceName(iface);
- const auto& status_and_iface_names2 =
- HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names2.first.code);
- EXPECT_EQ(1u, status_and_iface_names2.second.size());
- EXPECT_EQ(iface_name, status_and_iface_names2.second[0]);
-
- EXPECT_EQ(WifiStatusCode::SUCCESS, removeNanIface(iface_name));
- const auto& status_and_iface_names3 =
- HIDL_INVOKE(wifi_chip_, getNanIfaceNames);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface_names3.first.code);
- EXPECT_EQ(0u, status_and_iface_names3.second.size());
-}
-
-/*
- * GetNanIface
- * Configures the chip in NAN mode and create an iface. Then, retrieve
- * the iface object using the correct name and ensure any other name
- * doesn't retrieve an iface object.
- */
-TEST_F(WifiChipHidlTest, GetNanIface) {
- if (!gEnv->isNanOn) return;
- configureChipForIfaceType(IfaceType::NAN, gEnv->isNanOn);
-
- sp<IWifiNanIface> nan_iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&nan_iface));
- EXPECT_NE(nullptr, nan_iface.get());
-
- std::string iface_name = getIfaceName(nan_iface);
- const auto& status_and_iface1 =
- HIDL_INVOKE(wifi_chip_, getNanIface, iface_name);
- EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_iface1.first.code);
- EXPECT_NE(nullptr, status_and_iface1.second.get());
-
- std::string invalid_name = iface_name + "0";
- const auto& status_and_iface2 =
- HIDL_INVOKE(wifi_chip_, getNanIface, invalid_name);
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, status_and_iface2.first.code);
- EXPECT_EQ(nullptr, status_and_iface2.second.get());
-}
-
-/*
- * RemoveNanIface
- * Configures the chip in NAN mode and create an iface. Then, remove
- * the iface object using the correct name and ensure any other name
- * doesn't remove the iface.
- */
-TEST_F(WifiChipHidlTest, RemoveNanIface) {
- if (!gEnv->isNanOn) return;
- configureChipForIfaceType(IfaceType::NAN, gEnv->isNanOn);
-
- sp<IWifiNanIface> nan_iface;
- EXPECT_EQ(WifiStatusCode::SUCCESS, createNanIface(&nan_iface));
- EXPECT_NE(nullptr, nan_iface.get());
-
- std::string iface_name = getIfaceName(nan_iface);
- std::string invalid_name = iface_name + "0";
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeNanIface(invalid_name));
-
- EXPECT_EQ(WifiStatusCode::SUCCESS, removeNanIface(iface_name));
-
- // No such iface exists now. So, this should return failure.
- EXPECT_EQ(WifiStatusCode::ERROR_INVALID_ARGS, removeNanIface(iface_name));
-}
-
-/*
* CreateP2pIface
* Configures the chip in P2P mode and ensures that at least 1 iface creation
* succeeds.
diff --git a/wifi/1.0/vts/functional/wifi_hidl_test_utils.h b/wifi/1.0/vts/functional/wifi_hidl_test_utils.h
index d430ce0..7dacaf1 100644
--- a/wifi/1.0/vts/functional/wifi_hidl_test_utils.h
+++ b/wifi/1.0/vts/functional/wifi_hidl_test_utils.h
@@ -54,48 +54,4 @@
stopWifi();
sleep(5);
}
-
- public:
- // Whether NaN feature is supported on the device.
- bool isNanOn = false;
- // Whether SoftAp feature is supported on the device.
- bool isSoftApOn = false;
-
- void usage(char* me, char* arg) {
- fprintf(stderr,
- "unrecognized option: %s\n\n"
- "usage: %s <gtest options> <test options>\n\n"
- "test options are:\n\n"
- "-N, --nan_on: Whether NAN feature is supported\n"
- "-S, --softap_on: Whether SOFTAP feature is supported\n",
- arg, me);
- }
-
- int initFromOptions(int argc, char** argv) {
- static struct option options[] = {{"nan_on", no_argument, 0, 'N'},
- {"softap_on", no_argument, 0, 'S'},
- {0, 0, 0, 0}};
-
- int c;
- while ((c = getopt_long(argc, argv, "NS", options, NULL)) >= 0) {
- switch (c) {
- case 'N':
- isNanOn = true;
- break;
- case 'S':
- isSoftApOn = true;
- break;
- default:
- usage(argv[0], argv[optind]);
- return 2;
- }
- }
-
- if (optind < argc) {
- usage(argv[0], argv[optind]);
- return 2;
- }
-
- return 0;
- }
};
diff --git a/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp b/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
index a0f97f8..673fed3 100644
--- a/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
+++ b/wifi/1.1/vts/functional/VtsHalWifiV1_1TargetTest.cpp
@@ -41,10 +41,7 @@
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
gEnv->init(&argc, argv);
- int status = gEnv->initFromOptions(argc, argv);
- if (status == 0) {
- int status = RUN_ALL_TESTS();
- LOG(INFO) << "Test result = " << status;
- }
+ int status = RUN_ALL_TESTS();
+ LOG(INFO) << "Test result = " << status;
return status;
}
diff --git a/wifi/1.3/default/tests/wifi_ap_iface_unit_tests.cpp b/wifi/1.3/default/tests/wifi_ap_iface_unit_tests.cpp
deleted file mode 100644
index 680f534..0000000
--- a/wifi/1.3/default/tests/wifi_ap_iface_unit_tests.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2019, The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <android-base/logging.h>
-#include <android-base/macros.h>
-#include <cutils/properties.h>
-#include <gmock/gmock.h>
-
-#undef NAN // This is weird, NAN is defined in bionic/libc/include/math.h:38
-#include "wifi_ap_iface.h"
-
-#include "mock_interface_tool.h"
-#include "mock_wifi_feature_flags.h"
-#include "mock_wifi_iface_util.h"
-#include "mock_wifi_legacy_hal.h"
-
-using testing::NiceMock;
-using testing::Return;
-using testing::Test;
-
-namespace {
-constexpr char kIfaceName[] = "mockWlan0";
-} // namespace
-
-namespace android {
-namespace hardware {
-namespace wifi {
-namespace V1_3 {
-namespace implementation {
-
-class WifiApIfaceTest : public Test {
- protected:
- std::shared_ptr<NiceMock<wifi_system::MockInterfaceTool>> iface_tool_{
- new NiceMock<wifi_system::MockInterfaceTool>};
- std::shared_ptr<NiceMock<legacy_hal::MockWifiLegacyHal>> legacy_hal_{
- new NiceMock<legacy_hal::MockWifiLegacyHal>(iface_tool_)};
- std::shared_ptr<NiceMock<iface_util::MockWifiIfaceUtil>> iface_util_{
- new NiceMock<iface_util::MockWifiIfaceUtil>(iface_tool_)};
- std::shared_ptr<NiceMock<feature_flags::MockWifiFeatureFlags>>
- feature_flags_{new NiceMock<feature_flags::MockWifiFeatureFlags>};
-};
-
-TEST_F(WifiApIfaceTest, SetRandomMacAddressIfFeatureEnabled) {
- EXPECT_CALL(*feature_flags_, isApMacRandomizationDisabled())
- .WillOnce(testing::Return(false));
- EXPECT_CALL(*iface_util_, getOrCreateRandomMacAddress())
- .WillOnce(testing::Return(std::array<uint8_t, 6>{0, 0, 0, 0, 0, 0}));
- EXPECT_CALL(*iface_util_, setMacAddress(testing::_, testing::_))
- .WillOnce(testing::Return(true));
- sp<WifiApIface> ap_iface =
- new WifiApIface(kIfaceName, legacy_hal_, iface_util_, feature_flags_);
-}
-
-TEST_F(WifiApIfaceTest, DontSetRandomMacAddressIfFeatureDisabled) {
- EXPECT_CALL(*feature_flags_, isApMacRandomizationDisabled())
- .WillOnce(testing::Return(true));
- EXPECT_CALL(*iface_util_, getOrCreateRandomMacAddress()).Times(0);
- EXPECT_CALL(*iface_util_, setMacAddress(testing::_, testing::_)).Times(0);
- sp<WifiApIface> ap_iface =
- new WifiApIface(kIfaceName, legacy_hal_, iface_util_, feature_flags_);
-}
-} // namespace implementation
-} // namespace V1_3
-} // namespace wifi
-} // namespace hardware
-} // namespace android
diff --git a/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp b/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
index 71e90ac..d382f30 100644
--- a/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
+++ b/wifi/1.3/vts/functional/wifi_sta_iface_hidl_test.cpp
@@ -27,6 +27,8 @@
#include "wifi_hidl_test_utils.h"
using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::wifi::V1_0::WifiStatus;
using ::android::hardware::wifi::V1_0::WifiStatusCode;
using ::android::hardware::wifi::V1_3::IWifiStaIface;
@@ -59,14 +61,11 @@
* and return a success status code.
*/
TEST_F(WifiStaIfaceHidlTest, GetFactoryMacAddress) {
- const auto& status_and_mac =
+ std::pair<WifiStatus, hidl_array<uint8_t, 6> > status_and_mac =
HIDL_INVOKE(wifi_sta_iface_, getFactoryMacAddress);
EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_mac.first.code);
- const int num_elements = sizeof(status_and_mac.second) / sizeof(uint8_t);
- EXPECT_EQ(6, num_elements);
- for (int i = 0; i < num_elements; i++) {
- EXPECT_NE(0, status_and_mac.second[i]);
- }
+ hidl_array<uint8_t, 6> all_zero{};
+ EXPECT_NE(all_zero, status_and_mac.second);
}
/*
diff --git a/wifi/1.4/Android.bp b/wifi/1.4/Android.bp
new file mode 100644
index 0000000..aba8b44
--- /dev/null
+++ b/wifi/1.4/Android.bp
@@ -0,0 +1,21 @@
+// This file is autogenerated by hidl-gen -Landroidbp.
+
+hidl_interface {
+ name: "android.hardware.wifi@1.4",
+ root: "android.hardware",
+ vndk: {
+ enabled: true,
+ },
+ srcs: [
+ "IWifi.hal",
+ "IWifiApIface.hal",
+ ],
+ interfaces: [
+ "android.hardware.wifi@1.0",
+ "android.hardware.wifi@1.1",
+ "android.hardware.wifi@1.2",
+ "android.hardware.wifi@1.3",
+ "android.hidl.base@1.0",
+ ],
+ gen_java: true,
+}
diff --git a/wifi/1.4/IWifi.hal b/wifi/1.4/IWifi.hal
new file mode 100644
index 0000000..f4bc618
--- /dev/null
+++ b/wifi/1.4/IWifi.hal
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.wifi@1.4;
+
+import @1.3::IWifi;
+
+/**
+ * This is the root of the HAL module and is the interface returned when
+ * loading an implementation of the Wi-Fi HAL. There must be at most one
+ * module loaded in the system.
+ * IWifi.getChip() must return @1.2::IWifiChip
+ */
+interface IWifi extends @1.3::IWifi {
+};
diff --git a/wifi/1.4/IWifiApIface.hal b/wifi/1.4/IWifiApIface.hal
new file mode 100644
index 0000000..af88afb
--- /dev/null
+++ b/wifi/1.4/IWifiApIface.hal
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.wifi@1.4;
+
+import @1.0::IWifiApIface;
+import @1.0::MacAddress;
+import @1.0::WifiStatus;
+
+/**
+ * Represents a network interface in AP mode.
+ *
+ * This can be obtained through @1.0::IWifiChip.getApIface() and casting
+ * IWifiApIface up to 1.4.
+ */
+interface IWifiApIface extends @1.0::IWifiApIface {
+ /**
+ * Changes the MAC address of the interface to the given MAC address.
+ *
+ * @param mac MAC address to change to.
+ * @return status WifiStatus of the operation.
+ * Possible status codes:
+ * |WifiStatusCode.SUCCESS|,
+ * |WifiStatusCode.ERROR_WIFI_IFACE_INVALID|,
+ * |WifiStatusCode.ERROR_UNKNOWN|
+ */
+ setMacAddress(MacAddress mac) generates (WifiStatus status);
+
+ /**
+ * Gets the factory MAC address of the interface.
+ *
+ * @return status WifiStatus of the operation
+ * Possible status codes:
+ * |WifiStatusCode.SUCCESS|,
+ * |WifiStatusCode.ERROR_WIFI_IFACE_INVALID|,
+ * |WifiStatusCode.ERROR_UNKNOWN|
+ * @return mac factory MAC address of the interface
+ */
+ getFactoryMacAddress() generates (WifiStatus status, MacAddress mac);
+};
diff --git a/wifi/1.3/default/Android.mk b/wifi/1.4/default/Android.mk
similarity index 92%
rename from wifi/1.3/default/Android.mk
rename to wifi/1.4/default/Android.mk
index 29f1c42..ab76ff6 100644
--- a/wifi/1.3/default/Android.mk
+++ b/wifi/1.4/default/Android.mk
@@ -67,7 +67,8 @@
android.hardware.wifi@1.0 \
android.hardware.wifi@1.1 \
android.hardware.wifi@1.2 \
- android.hardware.wifi@1.3
+ android.hardware.wifi@1.3 \
+ android.hardware.wifi@1.4
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
include $(BUILD_STATIC_LIBRARY)
@@ -76,6 +77,7 @@
###
include $(CLEAR_VARS)
LOCAL_MODULE := android.hardware.wifi@1.0-service
+LOCAL_VINTF_FRAGMENTS := android.hardware.wifi@1.0-service.xml
LOCAL_MODULE_RELATIVE_PATH := hw
LOCAL_PROPRIETARY_MODULE := true
LOCAL_CPPFLAGS := -Wall -Werror -Wextra
@@ -93,7 +95,8 @@
android.hardware.wifi@1.0 \
android.hardware.wifi@1.1 \
android.hardware.wifi@1.2 \
- android.hardware.wifi@1.3
+ android.hardware.wifi@1.3 \
+ android.hardware.wifi@1.4
LOCAL_STATIC_LIBRARIES := \
android.hardware.wifi@1.0-service-lib
LOCAL_INIT_RC := android.hardware.wifi@1.0-service.rc
@@ -104,6 +107,7 @@
###
include $(CLEAR_VARS)
LOCAL_MODULE := android.hardware.wifi@1.0-service-lazy
+LOCAL_VINTF_FRAGMENTS := android.hardware.wifi@1.0-service.xml
LOCAL_OVERRIDES_MODULES := android.hardware.wifi@1.0-service
LOCAL_CFLAGS := -DLAZY_SERVICE
LOCAL_MODULE_RELATIVE_PATH := hw
@@ -123,7 +127,8 @@
android.hardware.wifi@1.0 \
android.hardware.wifi@1.1 \
android.hardware.wifi@1.2 \
- android.hardware.wifi@1.3
+ android.hardware.wifi@1.3 \
+ android.hardware.wifi@1.4
LOCAL_STATIC_LIBRARIES := \
android.hardware.wifi@1.0-service-lib
LOCAL_INIT_RC := android.hardware.wifi@1.0-service-lazy.rc
@@ -145,7 +150,6 @@
tests/mock_wifi_legacy_hal.cpp \
tests/mock_wifi_mode_controller.cpp \
tests/ringbuffer_unit_tests.cpp \
- tests/wifi_ap_iface_unit_tests.cpp \
tests/wifi_nan_iface_unit_tests.cpp \
tests/wifi_chip_unit_tests.cpp \
tests/wifi_iface_util_unit_tests.cpp
@@ -165,5 +169,6 @@
android.hardware.wifi@1.0 \
android.hardware.wifi@1.1 \
android.hardware.wifi@1.2 \
- android.hardware.wifi@1.3
+ android.hardware.wifi@1.3 \
+ android.hardware.wifi@1.4
include $(BUILD_NATIVE_TEST)
diff --git a/wifi/1.3/default/OWNERS b/wifi/1.4/default/OWNERS
similarity index 100%
rename from wifi/1.3/default/OWNERS
rename to wifi/1.4/default/OWNERS
diff --git a/wifi/1.3/default/THREADING.README b/wifi/1.4/default/THREADING.README
similarity index 100%
rename from wifi/1.3/default/THREADING.README
rename to wifi/1.4/default/THREADING.README
diff --git a/wifi/1.3/default/android.hardware.wifi@1.0-service-lazy.rc b/wifi/1.4/default/android.hardware.wifi@1.0-service-lazy.rc
similarity index 100%
rename from wifi/1.3/default/android.hardware.wifi@1.0-service-lazy.rc
rename to wifi/1.4/default/android.hardware.wifi@1.0-service-lazy.rc
diff --git a/wifi/1.3/default/android.hardware.wifi@1.0-service.rc b/wifi/1.4/default/android.hardware.wifi@1.0-service.rc
similarity index 100%
rename from wifi/1.3/default/android.hardware.wifi@1.0-service.rc
rename to wifi/1.4/default/android.hardware.wifi@1.0-service.rc
diff --git a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml b/wifi/1.4/default/android.hardware.wifi@1.0-service.xml
similarity index 64%
copy from vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
copy to wifi/1.4/default/android.hardware.wifi@1.0-service.xml
index 172aa21..b5d25cd 100644
--- a/vibrator/1.3/example/android.hardware.vibrator@1.3-service.example.xml
+++ b/wifi/1.4/default/android.hardware.wifi@1.0-service.xml
@@ -1,10 +1,10 @@
<manifest version="1.0" type="device">
<hal format="hidl">
- <name>android.hardware.vibrator</name>
+ <name>android.hardware.wifi</name>
<transport>hwbinder</transport>
- <version>1.3</version>
+ <version>1.4</version>
<interface>
- <name>IVibrator</name>
+ <name>IWifi</name>
<instance>default</instance>
</interface>
</hal>
diff --git a/wifi/1.3/default/hidl_callback_util.h b/wifi/1.4/default/hidl_callback_util.h
similarity index 98%
rename from wifi/1.3/default/hidl_callback_util.h
rename to wifi/1.4/default/hidl_callback_util.h
index a44af79..fc601b8 100644
--- a/wifi/1.3/default/hidl_callback_util.h
+++ b/wifi/1.4/default/hidl_callback_util.h
@@ -52,7 +52,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_callback_util {
template <typename CallbackType>
@@ -117,7 +117,7 @@
} // namespace hidl_callback_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/hidl_return_util.h b/wifi/1.4/default/hidl_return_util.h
similarity index 98%
rename from wifi/1.3/default/hidl_return_util.h
rename to wifi/1.4/default/hidl_return_util.h
index 9707444..99c7092 100644
--- a/wifi/1.3/default/hidl_return_util.h
+++ b/wifi/1.4/default/hidl_return_util.h
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_return_util {
using namespace android::hardware::wifi::V1_0;
@@ -113,7 +113,7 @@
} // namespace hidl_return_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/hidl_struct_util.cpp b/wifi/1.4/default/hidl_struct_util.cpp
similarity index 99%
rename from wifi/1.3/default/hidl_struct_util.cpp
rename to wifi/1.4/default/hidl_struct_util.cpp
index 2e4db70..47713cd 100644
--- a/wifi/1.3/default/hidl_struct_util.cpp
+++ b/wifi/1.4/default/hidl_struct_util.cpp
@@ -22,7 +22,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_struct_util {
@@ -302,11 +302,11 @@
}
legacy_hal::wifi_latency_mode convertHidlLatencyModeToLegacy(
- IWifiChip::LatencyMode hidl_latency_mode) {
+ V1_3::IWifiChip::LatencyMode hidl_latency_mode) {
switch (hidl_latency_mode) {
- case IWifiChip::LatencyMode::NORMAL:
+ case V1_3::IWifiChip::LatencyMode::NORMAL:
return legacy_hal::WIFI_LATENCY_MODE_NORMAL;
- case IWifiChip::LatencyMode::LOW:
+ case V1_3::IWifiChip::LatencyMode::LOW:
return legacy_hal::WIFI_LATENCY_MODE_LOW;
}
CHECK(false);
@@ -2683,7 +2683,7 @@
}
} // namespace hidl_struct_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/hidl_struct_util.h b/wifi/1.4/default/hidl_struct_util.h
similarity index 99%
rename from wifi/1.3/default/hidl_struct_util.h
rename to wifi/1.4/default/hidl_struct_util.h
index 3eefd95..c9f1c26 100644
--- a/wifi/1.3/default/hidl_struct_util.h
+++ b/wifi/1.4/default/hidl_struct_util.h
@@ -37,7 +37,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_struct_util {
using namespace android::hardware::wifi::V1_0;
@@ -188,7 +188,7 @@
std::vector<RttResult>* hidl_results);
} // namespace hidl_struct_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/hidl_sync_util.cpp b/wifi/1.4/default/hidl_sync_util.cpp
similarity index 96%
rename from wifi/1.3/default/hidl_sync_util.cpp
rename to wifi/1.4/default/hidl_sync_util.cpp
index 160727f..593a3bc 100644
--- a/wifi/1.3/default/hidl_sync_util.cpp
+++ b/wifi/1.4/default/hidl_sync_util.cpp
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_sync_util {
@@ -33,7 +33,7 @@
} // namespace hidl_sync_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/hidl_sync_util.h b/wifi/1.4/default/hidl_sync_util.h
similarity index 96%
rename from wifi/1.3/default/hidl_sync_util.h
rename to wifi/1.4/default/hidl_sync_util.h
index ebfb051..0244421 100644
--- a/wifi/1.3/default/hidl_sync_util.h
+++ b/wifi/1.4/default/hidl_sync_util.h
@@ -24,13 +24,13 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace hidl_sync_util {
std::unique_lock<std::recursive_mutex> acquireGlobalLock();
} // namespace hidl_sync_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/ringbuffer.cpp b/wifi/1.4/default/ringbuffer.cpp
similarity index 97%
rename from wifi/1.3/default/ringbuffer.cpp
rename to wifi/1.4/default/ringbuffer.cpp
index 1294c52..0fe8ef4 100644
--- a/wifi/1.3/default/ringbuffer.cpp
+++ b/wifi/1.4/default/ringbuffer.cpp
@@ -21,7 +21,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
Ringbuffer::Ringbuffer(size_t maxSize) : size_(0), maxSize_(maxSize) {}
@@ -48,7 +48,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/ringbuffer.h b/wifi/1.4/default/ringbuffer.h
similarity index 97%
rename from wifi/1.3/default/ringbuffer.h
rename to wifi/1.4/default/ringbuffer.h
index d9f8df6..ddce648 100644
--- a/wifi/1.3/default/ringbuffer.h
+++ b/wifi/1.4/default/ringbuffer.h
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
/**
@@ -45,7 +45,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/service.cpp b/wifi/1.4/default/service.cpp
similarity index 85%
rename from wifi/1.3/default/service.cpp
rename to wifi/1.4/default/service.cpp
index 0b41d28..3f7f609 100644
--- a/wifi/1.3/default/service.cpp
+++ b/wifi/1.4/default/service.cpp
@@ -28,11 +28,11 @@
using android::hardware::configureRpcThreadpool;
using android::hardware::joinRpcThreadpool;
using android::hardware::LazyServiceRegistrar;
-using android::hardware::wifi::V1_3::implementation::feature_flags::
+using android::hardware::wifi::V1_4::implementation::feature_flags::
WifiFeatureFlags;
-using android::hardware::wifi::V1_3::implementation::iface_util::WifiIfaceUtil;
-using android::hardware::wifi::V1_3::implementation::legacy_hal::WifiLegacyHal;
-using android::hardware::wifi::V1_3::implementation::mode_controller::
+using android::hardware::wifi::V1_4::implementation::iface_util::WifiIfaceUtil;
+using android::hardware::wifi::V1_4::implementation::legacy_hal::WifiLegacyHal;
+using android::hardware::wifi::V1_4::implementation::mode_controller::
WifiModeController;
#ifdef LAZY_SERVICE
@@ -51,8 +51,8 @@
const auto iface_tool =
std::make_shared<android::wifi_system::InterfaceTool>();
// Setup hwbinder service
- android::sp<android::hardware::wifi::V1_3::IWifi> service =
- new android::hardware::wifi::V1_3::implementation::Wifi(
+ android::sp<android::hardware::wifi::V1_4::IWifi> service =
+ new android::hardware::wifi::V1_4::implementation::Wifi(
iface_tool, std::make_shared<WifiLegacyHal>(iface_tool),
std::make_shared<WifiModeController>(),
std::make_shared<WifiIfaceUtil>(iface_tool),
diff --git a/wifi/1.3/default/tests/hidl_struct_util_unit_tests.cpp b/wifi/1.4/default/tests/hidl_struct_util_unit_tests.cpp
similarity index 98%
rename from wifi/1.3/default/tests/hidl_struct_util_unit_tests.cpp
rename to wifi/1.4/default/tests/hidl_struct_util_unit_tests.cpp
index dbf7bd6..14a1504 100644
--- a/wifi/1.3/default/tests/hidl_struct_util_unit_tests.cpp
+++ b/wifi/1.4/default/tests/hidl_struct_util_unit_tests.cpp
@@ -34,7 +34,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
using ::android::hardware::wifi::V1_0::WifiChannelWidthInMhz;
@@ -170,12 +170,14 @@
legacy_hal::wifi_channel_stat channel_stat1 = {
.channel = {legacy_hal::WIFI_CHAN_WIDTH_20, 2437, 2437, 0},
+ .on_time = 0x1111,
.cca_busy_time = 0x55,
- .on_time = 0x1111};
+ };
legacy_hal::wifi_channel_stat channel_stat2 = {
.channel = {legacy_hal::WIFI_CHAN_WIDTH_20, 5180, 5180, 0},
+ .on_time = 0x2222,
.cca_busy_time = 0x66,
- .on_time = 0x2222};
+ };
radio.channel_stats.push_back(channel_stat1);
radio.channel_stats.push_back(channel_stat2);
}
@@ -275,9 +277,9 @@
uint32_t hidle_caps;
uint32_t legacy_feature_set =
- WIFI_FEATURE_D2D_RTT | WIFI_FEATURE_SET_LATENCY_MODE;
+ WIFI_FEATURE_D2D_RTT | WIFI_FEATURE_SET_LATENCY_MODE;
uint32_t legacy_logger_feature_set =
- legacy_hal::WIFI_LOGGER_DRIVER_DUMP_SUPPORTED;
+ legacy_hal::WIFI_LOGGER_DRIVER_DUMP_SUPPORTED;
ASSERT_TRUE(hidl_struct_util::convertLegacyFeaturesToHidlChipCapabilities(
legacy_feature_set, legacy_logger_feature_set, &hidle_caps));
@@ -290,7 +292,7 @@
hidle_caps);
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/main.cpp b/wifi/1.4/default/tests/main.cpp
similarity index 100%
rename from wifi/1.3/default/tests/main.cpp
rename to wifi/1.4/default/tests/main.cpp
diff --git a/wifi/1.3/default/tests/mock_interface_tool.cpp b/wifi/1.4/default/tests/mock_interface_tool.cpp
similarity index 100%
rename from wifi/1.3/default/tests/mock_interface_tool.cpp
rename to wifi/1.4/default/tests/mock_interface_tool.cpp
diff --git a/wifi/1.3/default/tests/mock_interface_tool.h b/wifi/1.4/default/tests/mock_interface_tool.h
similarity index 100%
rename from wifi/1.3/default/tests/mock_interface_tool.h
rename to wifi/1.4/default/tests/mock_interface_tool.h
diff --git a/wifi/1.3/default/tests/mock_wifi_feature_flags.cpp b/wifi/1.4/default/tests/mock_wifi_feature_flags.cpp
similarity index 96%
rename from wifi/1.3/default/tests/mock_wifi_feature_flags.cpp
rename to wifi/1.4/default/tests/mock_wifi_feature_flags.cpp
index a393fdc..b1fa432 100644
--- a/wifi/1.3/default/tests/mock_wifi_feature_flags.cpp
+++ b/wifi/1.4/default/tests/mock_wifi_feature_flags.cpp
@@ -21,7 +21,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace feature_flags {
@@ -29,7 +29,7 @@
} // namespace feature_flags
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_feature_flags.h b/wifi/1.4/default/tests/mock_wifi_feature_flags.h
similarity index 96%
rename from wifi/1.3/default/tests/mock_wifi_feature_flags.h
rename to wifi/1.4/default/tests/mock_wifi_feature_flags.h
index ee12b54..72d2304 100644
--- a/wifi/1.3/default/tests/mock_wifi_feature_flags.h
+++ b/wifi/1.4/default/tests/mock_wifi_feature_flags.h
@@ -25,7 +25,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace feature_flags {
@@ -39,7 +39,7 @@
} // namespace feature_flags
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_iface_util.cpp b/wifi/1.4/default/tests/mock_wifi_iface_util.cpp
similarity index 96%
rename from wifi/1.3/default/tests/mock_wifi_iface_util.cpp
rename to wifi/1.4/default/tests/mock_wifi_iface_util.cpp
index 3d877c0..0968569 100644
--- a/wifi/1.3/default/tests/mock_wifi_iface_util.cpp
+++ b/wifi/1.4/default/tests/mock_wifi_iface_util.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace iface_util {
@@ -33,7 +33,7 @@
: WifiIfaceUtil(iface_tool) {}
} // namespace iface_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_iface_util.h b/wifi/1.4/default/tests/mock_wifi_iface_util.h
similarity index 97%
rename from wifi/1.3/default/tests/mock_wifi_iface_util.h
rename to wifi/1.4/default/tests/mock_wifi_iface_util.h
index 8ec93eb..6cc81e4 100644
--- a/wifi/1.3/default/tests/mock_wifi_iface_util.h
+++ b/wifi/1.4/default/tests/mock_wifi_iface_util.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace iface_util {
@@ -43,7 +43,7 @@
};
} // namespace iface_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_legacy_hal.cpp b/wifi/1.4/default/tests/mock_wifi_legacy_hal.cpp
similarity index 96%
rename from wifi/1.3/default/tests/mock_wifi_legacy_hal.cpp
rename to wifi/1.4/default/tests/mock_wifi_legacy_hal.cpp
index 0a202c4..8d65c59 100644
--- a/wifi/1.3/default/tests/mock_wifi_legacy_hal.cpp
+++ b/wifi/1.4/default/tests/mock_wifi_legacy_hal.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace legacy_hal {
@@ -33,7 +33,7 @@
: WifiLegacyHal(iface_tool) {}
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_legacy_hal.h b/wifi/1.4/default/tests/mock_wifi_legacy_hal.h
similarity index 92%
rename from wifi/1.3/default/tests/mock_wifi_legacy_hal.h
rename to wifi/1.4/default/tests/mock_wifi_legacy_hal.h
index 81cb1de..6942c1e 100644
--- a/wifi/1.3/default/tests/mock_wifi_legacy_hal.h
+++ b/wifi/1.4/default/tests/mock_wifi_legacy_hal.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace legacy_hal {
@@ -41,9 +41,9 @@
wifi_error(const std::string&,
const on_radio_mode_change_callback&));
MOCK_METHOD1(getFirmwareVersion, std::pair<wifi_error, std::string>(
- const std::string& iface_name));
+ const std::string& iface_name));
MOCK_METHOD1(getDriverVersion, std::pair<wifi_error, std::string>(
- const std::string& iface_name));
+ const std::string& iface_name));
MOCK_METHOD2(selectTxPowerScenario,
wifi_error(const std::string& iface_name,
@@ -60,7 +60,7 @@
};
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_mode_controller.cpp b/wifi/1.4/default/tests/mock_wifi_mode_controller.cpp
similarity index 96%
rename from wifi/1.3/default/tests/mock_wifi_mode_controller.cpp
rename to wifi/1.4/default/tests/mock_wifi_mode_controller.cpp
index 2b0ea36..ee09029 100644
--- a/wifi/1.3/default/tests/mock_wifi_mode_controller.cpp
+++ b/wifi/1.4/default/tests/mock_wifi_mode_controller.cpp
@@ -24,14 +24,14 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace mode_controller {
MockWifiModeController::MockWifiModeController() : WifiModeController() {}
} // namespace mode_controller
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/mock_wifi_mode_controller.h b/wifi/1.4/default/tests/mock_wifi_mode_controller.h
similarity index 97%
rename from wifi/1.3/default/tests/mock_wifi_mode_controller.h
rename to wifi/1.4/default/tests/mock_wifi_mode_controller.h
index c204059..1e1ce69 100644
--- a/wifi/1.3/default/tests/mock_wifi_mode_controller.h
+++ b/wifi/1.4/default/tests/mock_wifi_mode_controller.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace mode_controller {
@@ -38,7 +38,7 @@
};
} // namespace mode_controller
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/ringbuffer_unit_tests.cpp b/wifi/1.4/default/tests/ringbuffer_unit_tests.cpp
similarity index 98%
rename from wifi/1.3/default/tests/ringbuffer_unit_tests.cpp
rename to wifi/1.4/default/tests/ringbuffer_unit_tests.cpp
index 0cf1e4f..a65347f 100644
--- a/wifi/1.3/default/tests/ringbuffer_unit_tests.cpp
+++ b/wifi/1.4/default/tests/ringbuffer_unit_tests.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
class RingbufferTest : public Test {
@@ -91,7 +91,7 @@
EXPECT_EQ(input, buffer_.getData().front());
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/runtests.sh b/wifi/1.4/default/tests/runtests.sh
similarity index 100%
rename from wifi/1.3/default/tests/runtests.sh
rename to wifi/1.4/default/tests/runtests.sh
diff --git a/wifi/1.3/default/tests/wifi_chip_unit_tests.cpp b/wifi/1.4/default/tests/wifi_chip_unit_tests.cpp
similarity index 99%
rename from wifi/1.3/default/tests/wifi_chip_unit_tests.cpp
rename to wifi/1.4/default/tests/wifi_chip_unit_tests.cpp
index d8ce278..b0357ba 100644
--- a/wifi/1.3/default/tests/wifi_chip_unit_tests.cpp
+++ b/wifi/1.4/default/tests/wifi_chip_unit_tests.cpp
@@ -41,7 +41,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
class WifiChipTest : public Test {
@@ -173,8 +173,9 @@
std::string createIface(const IfaceType& type) {
std::string iface_name;
if (type == IfaceType::AP) {
- chip_->createApIface([&iface_name](const WifiStatus& status,
- const sp<IWifiApIface>& iface) {
+ chip_->createApIface([&iface_name](
+ const WifiStatus& status,
+ const sp<V1_0::IWifiApIface>& iface) {
if (WifiStatusCode::SUCCESS == status.code) {
ASSERT_NE(iface.get(), nullptr);
iface->getName([&iface_name](const WifiStatus& status,
@@ -865,7 +866,7 @@
ASSERT_EQ(createIface(IfaceType::STA), "wlan3");
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/wifi_iface_util_unit_tests.cpp b/wifi/1.4/default/tests/wifi_iface_util_unit_tests.cpp
similarity index 98%
rename from wifi/1.3/default/tests/wifi_iface_util_unit_tests.cpp
rename to wifi/1.4/default/tests/wifi_iface_util_unit_tests.cpp
index 28d23ff..03394bc 100644
--- a/wifi/1.3/default/tests/wifi_iface_util_unit_tests.cpp
+++ b/wifi/1.4/default/tests/wifi_iface_util_unit_tests.cpp
@@ -41,7 +41,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace iface_util {
class WifiIfaceUtilTest : public Test {
@@ -90,7 +90,7 @@
}
} // namespace iface_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/tests/wifi_nan_iface_unit_tests.cpp b/wifi/1.4/default/tests/wifi_nan_iface_unit_tests.cpp
similarity index 99%
rename from wifi/1.3/default/tests/wifi_nan_iface_unit_tests.cpp
rename to wifi/1.4/default/tests/wifi_nan_iface_unit_tests.cpp
index eb6c610..8aefa92 100644
--- a/wifi/1.3/default/tests/wifi_nan_iface_unit_tests.cpp
+++ b/wifi/1.4/default/tests/wifi_nan_iface_unit_tests.cpp
@@ -38,7 +38,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
bool CaptureIfaceEventHandlers(
@@ -142,7 +142,7 @@
captured_iface_event_handlers.on_state_toggle_off_on(kIfaceName);
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi.cpp b/wifi/1.4/default/wifi.cpp
similarity index 99%
rename from wifi/1.3/default/wifi.cpp
rename to wifi/1.4/default/wifi.cpp
index 2f21819..4f48d7e 100644
--- a/wifi/1.3/default/wifi.cpp
+++ b/wifi/1.4/default/wifi.cpp
@@ -28,7 +28,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
using hidl_return_util::validateAndCallWithLock;
@@ -210,7 +210,7 @@
return createWifiStatus(WifiStatusCode::SUCCESS);
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi.h b/wifi/1.4/default/wifi.h
similarity index 96%
rename from wifi/1.3/default/wifi.h
rename to wifi/1.4/default/wifi.h
index 1c2a154..087d6f7 100644
--- a/wifi/1.3/default/wifi.h
+++ b/wifi/1.4/default/wifi.h
@@ -20,7 +20,7 @@
#include <functional>
#include <android-base/macros.h>
-#include <android/hardware/wifi/1.3/IWifi.h>
+#include <android/hardware/wifi/1.4/IWifi.h>
#include <utils/Looper.h>
#include "hidl_callback_util.h"
@@ -32,13 +32,13 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
/**
* Root HIDL interface object used to control the Wifi HAL.
*/
-class Wifi : public V1_3::IWifi {
+class Wifi : public V1_4::IWifi {
public:
Wifi(const std::shared_ptr<wifi_system::InterfaceTool> iface_tool,
const std::shared_ptr<legacy_hal::WifiLegacyHal> legacy_hal,
@@ -92,7 +92,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_ap_iface.cpp b/wifi/1.4/default/wifi_ap_iface.cpp
similarity index 71%
rename from wifi/1.3/default/wifi_ap_iface.cpp
rename to wifi/1.4/default/wifi_ap_iface.cpp
index 9a8681a..e677f19 100644
--- a/wifi/1.3/default/wifi_ap_iface.cpp
+++ b/wifi/1.4/default/wifi_ap_iface.cpp
@@ -24,33 +24,18 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
WifiApIface::WifiApIface(
const std::string& ifname,
const std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal,
- const std::weak_ptr<iface_util::WifiIfaceUtil> iface_util,
- const std::weak_ptr<feature_flags::WifiFeatureFlags> feature_flags)
+ const std::weak_ptr<iface_util::WifiIfaceUtil> iface_util)
: ifname_(ifname),
legacy_hal_(legacy_hal),
iface_util_(iface_util),
- feature_flags_(feature_flags),
- is_valid_(true) {
- if (feature_flags_.lock()->isApMacRandomizationDisabled()) {
- LOG(INFO) << "AP MAC randomization disabled";
- return;
- }
- LOG(INFO) << "AP MAC randomization enabled";
- // Set random MAC address
- std::array<uint8_t, 6> randomized_mac =
- iface_util_.lock()->getOrCreateRandomMacAddress();
- bool status = iface_util_.lock()->setMacAddress(ifname_, randomized_mac);
- if (!status) {
- LOG(ERROR) << "Failed to set random mac address";
- }
-}
+ is_valid_(true) {}
void WifiApIface::invalidate() {
legacy_hal_.reset();
@@ -85,6 +70,20 @@
hidl_status_cb, band);
}
+Return<void> WifiApIface::setMacAddress(const hidl_array<uint8_t, 6>& mac,
+ setMacAddress_cb hidl_status_cb) {
+ return validateAndCall(this, WifiStatusCode::ERROR_WIFI_IFACE_INVALID,
+ &WifiApIface::setMacAddressInternal, hidl_status_cb,
+ mac);
+}
+
+Return<void> WifiApIface::getFactoryMacAddress(
+ getFactoryMacAddress_cb hidl_status_cb) {
+ return validateAndCall(this, WifiStatusCode::ERROR_WIFI_IFACE_INVALID,
+ &WifiApIface::getFactoryMacAddressInternal,
+ hidl_status_cb);
+}
+
std::pair<WifiStatus, std::string> WifiApIface::getNameInternal() {
return {createWifiStatus(WifiStatusCode::SUCCESS), ifname_};
}
@@ -111,8 +110,28 @@
ifname_, hidl_struct_util::convertHidlWifiBandToLegacy(band));
return {createWifiStatusFromLegacyError(legacy_status), valid_frequencies};
}
+
+WifiStatus WifiApIface::setMacAddressInternal(
+ const std::array<uint8_t, 6>& mac) {
+ bool status = iface_util_.lock()->setMacAddress(ifname_, mac);
+ if (!status) {
+ return createWifiStatus(WifiStatusCode::ERROR_UNKNOWN);
+ }
+ return createWifiStatus(WifiStatusCode::SUCCESS);
+}
+
+std::pair<WifiStatus, std::array<uint8_t, 6>>
+WifiApIface::getFactoryMacAddressInternal() {
+ std::array<uint8_t, 6> mac =
+ iface_util_.lock()->getFactoryMacAddress(ifname_);
+ if (mac[0] == 0 && mac[1] == 0 && mac[2] == 0 && mac[3] == 0 &&
+ mac[4] == 0 && mac[5] == 0) {
+ return {createWifiStatus(WifiStatusCode::ERROR_UNKNOWN), mac};
+ }
+ return {createWifiStatus(WifiStatusCode::SUCCESS), mac};
+}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_ap_iface.h b/wifi/1.4/default/wifi_ap_iface.h
similarity index 75%
rename from wifi/1.3/default/wifi_ap_iface.h
rename to wifi/1.4/default/wifi_ap_iface.h
index 98c5c9c..4f3438c 100644
--- a/wifi/1.3/default/wifi_ap_iface.h
+++ b/wifi/1.4/default/wifi_ap_iface.h
@@ -18,29 +18,26 @@
#define WIFI_AP_IFACE_H_
#include <android-base/macros.h>
-#include <android/hardware/wifi/1.0/IWifiApIface.h>
+#include <android/hardware/wifi/1.4/IWifiApIface.h>
-#include "wifi_feature_flags.h"
#include "wifi_iface_util.h"
#include "wifi_legacy_hal.h"
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
/**
* HIDL interface object used to control a AP Iface instance.
*/
-class WifiApIface : public V1_0::IWifiApIface {
+class WifiApIface : public V1_4::IWifiApIface {
public:
- WifiApIface(
- const std::string& ifname,
- const std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal,
- const std::weak_ptr<iface_util::WifiIfaceUtil> iface_util,
- const std::weak_ptr<feature_flags::WifiFeatureFlags> feature_flags);
+ WifiApIface(const std::string& ifname,
+ const std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal,
+ const std::weak_ptr<iface_util::WifiIfaceUtil> iface_util);
// Refer to |WifiChip::invalidate()|.
void invalidate();
bool isValid();
@@ -53,6 +50,10 @@
setCountryCode_cb hidl_status_cb) override;
Return<void> getValidFrequenciesForBand(
WifiBand band, getValidFrequenciesForBand_cb hidl_status_cb) override;
+ Return<void> setMacAddress(const hidl_array<uint8_t, 6>& mac,
+ setMacAddress_cb hidl_status_cb) override;
+ Return<void> getFactoryMacAddress(
+ getFactoryMacAddress_cb hidl_status_cb) override;
private:
// Corresponding worker functions for the HIDL methods.
@@ -61,18 +62,20 @@
WifiStatus setCountryCodeInternal(const std::array<int8_t, 2>& code);
std::pair<WifiStatus, std::vector<WifiChannelInMhz>>
getValidFrequenciesForBandInternal(WifiBand band);
+ WifiStatus setMacAddressInternal(const std::array<uint8_t, 6>& mac);
+ std::pair<WifiStatus, std::array<uint8_t, 6>>
+ getFactoryMacAddressInternal();
std::string ifname_;
std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal_;
std::weak_ptr<iface_util::WifiIfaceUtil> iface_util_;
- std::weak_ptr<feature_flags::WifiFeatureFlags> feature_flags_;
bool is_valid_;
DISALLOW_COPY_AND_ASSIGN(WifiApIface);
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_chip.cpp b/wifi/1.4/default/wifi_chip.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_chip.cpp
rename to wifi/1.4/default/wifi_chip.cpp
index e9991dc..408096f 100644
--- a/wifi/1.3/default/wifi_chip.cpp
+++ b/wifi/1.4/default/wifi_chip.cpp
@@ -307,7 +307,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
using hidl_return_util::validateAndCallWithLock;
@@ -321,7 +321,6 @@
legacy_hal_(legacy_hal),
mode_controller_(mode_controller),
iface_util_(iface_util),
- feature_flags_(feature_flags),
is_valid_(true),
current_mode_id_(feature_flags::chip_mode_ids::kInvalid),
modes_(feature_flags.lock()->getChipModes()),
@@ -806,8 +805,7 @@
return {createWifiStatus(WifiStatusCode::ERROR_NOT_AVAILABLE), {}};
}
std::string ifname = allocateApIfaceName();
- sp<WifiApIface> iface =
- new WifiApIface(ifname, legacy_hal_, iface_util_, feature_flags_);
+ sp<WifiApIface> iface = new WifiApIface(ifname, legacy_hal_, iface_util_);
ap_ifaces_.push_back(iface);
for (const auto& callback : event_cb_handler_.getCallbacks()) {
if (!callback->onIfaceAdded(IfaceType::AP, ifname).isOk()) {
@@ -1539,7 +1537,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_chip.h b/wifi/1.4/default/wifi_chip.h
similarity index 98%
rename from wifi/1.3/default/wifi_chip.h
rename to wifi/1.4/default/wifi_chip.h
index 153ca6a..0d7061c 100644
--- a/wifi/1.3/default/wifi_chip.h
+++ b/wifi/1.4/default/wifi_chip.h
@@ -37,7 +37,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
@@ -251,7 +251,6 @@
std::weak_ptr<legacy_hal::WifiLegacyHal> legacy_hal_;
std::weak_ptr<mode_controller::WifiModeController> mode_controller_;
std::weak_ptr<iface_util::WifiIfaceUtil> iface_util_;
- std::weak_ptr<feature_flags::WifiFeatureFlags> feature_flags_;
std::vector<sp<WifiApIface>> ap_ifaces_;
std::vector<sp<WifiNanIface>> nan_ifaces_;
std::vector<sp<WifiP2pIface>> p2p_ifaces_;
@@ -273,7 +272,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_feature_flags.cpp b/wifi/1.4/default/wifi_feature_flags.cpp
similarity index 92%
rename from wifi/1.3/default/wifi_feature_flags.cpp
rename to wifi/1.4/default/wifi_feature_flags.cpp
index 7212cfa..195b460 100644
--- a/wifi/1.3/default/wifi_feature_flags.cpp
+++ b/wifi/1.4/default/wifi_feature_flags.cpp
@@ -19,7 +19,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace feature_flags {
@@ -145,10 +145,12 @@
#undef NAN
#ifdef WIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION
-static const bool wifiHidlFeatureDisableApMacRandomization = true;
-#else
-static const bool wifiHidlFeatureDisableApMacRandomization = false;
-#endif // WIFI_HIDL_FEATURE_DISABLE_AP
+#pragma message \
+ "WIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION is deprecated; override " \
+ "'config_wifi_ap_randomization_supported' in " \
+ "frameworks/base/core/res/res/values/config.xml in the device overlay " \
+ "instead"
+#endif // WIFI_HIDL_FEATURE_DISABLE_AP_MAC_RANDOMIZATION
WifiFeatureFlags::WifiFeatureFlags() {}
@@ -156,13 +158,9 @@
return kChipModes;
}
-bool WifiFeatureFlags::isApMacRandomizationDisabled() {
- return wifiHidlFeatureDisableApMacRandomization;
-}
-
} // namespace feature_flags
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_feature_flags.h b/wifi/1.4/default/wifi_feature_flags.h
similarity index 94%
rename from wifi/1.3/default/wifi_feature_flags.h
rename to wifi/1.4/default/wifi_feature_flags.h
index 3ae6920..292dedf 100644
--- a/wifi/1.3/default/wifi_feature_flags.h
+++ b/wifi/1.4/default/wifi_feature_flags.h
@@ -22,7 +22,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace feature_flags {
@@ -43,12 +43,11 @@
virtual ~WifiFeatureFlags() = default;
virtual std::vector<V1_0::IWifiChip::ChipMode> getChipModes();
- virtual bool isApMacRandomizationDisabled();
};
} // namespace feature_flags
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_iface_util.cpp b/wifi/1.4/default/wifi_iface_util.cpp
similarity index 98%
rename from wifi/1.3/default/wifi_iface_util.cpp
rename to wifi/1.4/default/wifi_iface_util.cpp
index 34bc02d..2883b46 100644
--- a/wifi/1.3/default/wifi_iface_util.cpp
+++ b/wifi/1.4/default/wifi_iface_util.cpp
@@ -35,7 +35,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace iface_util {
@@ -112,7 +112,7 @@
}
} // namespace iface_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_iface_util.h b/wifi/1.4/default/wifi_iface_util.h
similarity index 98%
rename from wifi/1.3/default/wifi_iface_util.h
rename to wifi/1.4/default/wifi_iface_util.h
index 98073e0..35edff6 100644
--- a/wifi/1.3/default/wifi_iface_util.h
+++ b/wifi/1.4/default/wifi_iface_util.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace iface_util {
@@ -67,7 +67,7 @@
} // namespace iface_util
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_legacy_hal.cpp b/wifi/1.4/default/wifi_legacy_hal.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_legacy_hal.cpp
rename to wifi/1.4/default/wifi_legacy_hal.cpp
index 485bd16..8139253 100644
--- a/wifi/1.3/default/wifi_legacy_hal.cpp
+++ b/wifi/1.4/default/wifi_legacy_hal.cpp
@@ -50,7 +50,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace legacy_hal {
// Legacy HAL functions accept "C" style function pointers, so use global
@@ -1449,7 +1449,7 @@
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_legacy_hal.h b/wifi/1.4/default/wifi_legacy_hal.h
similarity index 99%
rename from wifi/1.3/default/wifi_legacy_hal.h
rename to wifi/1.4/default/wifi_legacy_hal.h
index 9cfa172..7f16c30 100644
--- a/wifi/1.3/default/wifi_legacy_hal.h
+++ b/wifi/1.4/default/wifi_legacy_hal.h
@@ -35,7 +35,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
// This is in a separate namespace to prevent typename conflicts between
// the legacy HAL types and the HIDL interface types.
@@ -396,7 +396,7 @@
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_legacy_hal_stubs.cpp b/wifi/1.4/default/wifi_legacy_hal_stubs.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_legacy_hal_stubs.cpp
rename to wifi/1.4/default/wifi_legacy_hal_stubs.cpp
index dedd2d4..27afa1f 100644
--- a/wifi/1.3/default/wifi_legacy_hal_stubs.cpp
+++ b/wifi/1.4/default/wifi_legacy_hal_stubs.cpp
@@ -20,7 +20,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace legacy_hal {
template <typename>
@@ -142,7 +142,7 @@
}
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_legacy_hal_stubs.h b/wifi/1.4/default/wifi_legacy_hal_stubs.h
similarity index 96%
rename from wifi/1.3/default/wifi_legacy_hal_stubs.h
rename to wifi/1.4/default/wifi_legacy_hal_stubs.h
index 64854e0..577a545 100644
--- a/wifi/1.3/default/wifi_legacy_hal_stubs.h
+++ b/wifi/1.4/default/wifi_legacy_hal_stubs.h
@@ -20,7 +20,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace legacy_hal {
#include <hardware_legacy/wifi_hal.h>
@@ -28,7 +28,7 @@
bool initHalFuncTableWithStubs(wifi_hal_fn* hal_fn);
} // namespace legacy_hal
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_mode_controller.cpp b/wifi/1.4/default/wifi_mode_controller.cpp
similarity index 98%
rename from wifi/1.3/default/wifi_mode_controller.cpp
rename to wifi/1.4/default/wifi_mode_controller.cpp
index c392486..252121a 100644
--- a/wifi/1.3/default/wifi_mode_controller.cpp
+++ b/wifi/1.4/default/wifi_mode_controller.cpp
@@ -48,7 +48,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace mode_controller {
@@ -85,7 +85,7 @@
}
} // namespace mode_controller
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_mode_controller.h b/wifi/1.4/default/wifi_mode_controller.h
similarity index 97%
rename from wifi/1.3/default/wifi_mode_controller.h
rename to wifi/1.4/default/wifi_mode_controller.h
index ace5a52..45fa999 100644
--- a/wifi/1.3/default/wifi_mode_controller.h
+++ b/wifi/1.4/default/wifi_mode_controller.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
namespace mode_controller {
using namespace android::hardware::wifi::V1_0;
@@ -55,7 +55,7 @@
} // namespace mode_controller
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_nan_iface.cpp b/wifi/1.4/default/wifi_nan_iface.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_nan_iface.cpp
rename to wifi/1.4/default/wifi_nan_iface.cpp
index ff9f422..981acb2 100644
--- a/wifi/1.3/default/wifi_nan_iface.cpp
+++ b/wifi/1.4/default/wifi_nan_iface.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
@@ -881,7 +881,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_nan_iface.h b/wifi/1.4/default/wifi_nan_iface.h
similarity index 99%
rename from wifi/1.3/default/wifi_nan_iface.h
rename to wifi/1.4/default/wifi_nan_iface.h
index 737be93..e3a5c34 100644
--- a/wifi/1.3/default/wifi_nan_iface.h
+++ b/wifi/1.4/default/wifi_nan_iface.h
@@ -28,7 +28,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
@@ -160,7 +160,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_p2p_iface.cpp b/wifi/1.4/default/wifi_p2p_iface.cpp
similarity index 97%
rename from wifi/1.3/default/wifi_p2p_iface.cpp
rename to wifi/1.4/default/wifi_p2p_iface.cpp
index b5d5886..9e7341f 100644
--- a/wifi/1.3/default/wifi_p2p_iface.cpp
+++ b/wifi/1.4/default/wifi_p2p_iface.cpp
@@ -23,7 +23,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
@@ -60,7 +60,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_p2p_iface.h b/wifi/1.4/default/wifi_p2p_iface.h
similarity index 97%
rename from wifi/1.3/default/wifi_p2p_iface.h
rename to wifi/1.4/default/wifi_p2p_iface.h
index 8a7207a..a6fc59d 100644
--- a/wifi/1.3/default/wifi_p2p_iface.h
+++ b/wifi/1.4/default/wifi_p2p_iface.h
@@ -25,7 +25,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
@@ -58,7 +58,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_rtt_controller.cpp b/wifi/1.4/default/wifi_rtt_controller.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_rtt_controller.cpp
rename to wifi/1.4/default/wifi_rtt_controller.cpp
index 3dcbee6..d50c7cf 100644
--- a/wifi/1.3/default/wifi_rtt_controller.cpp
+++ b/wifi/1.4/default/wifi_rtt_controller.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
@@ -271,7 +271,7 @@
return createWifiStatusFromLegacyError(legacy_status);
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_rtt_controller.h b/wifi/1.4/default/wifi_rtt_controller.h
similarity index 98%
rename from wifi/1.3/default/wifi_rtt_controller.h
rename to wifi/1.4/default/wifi_rtt_controller.h
index eedd22a..1415ff7 100644
--- a/wifi/1.3/default/wifi_rtt_controller.h
+++ b/wifi/1.4/default/wifi_rtt_controller.h
@@ -27,7 +27,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
/**
@@ -98,7 +98,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_sta_iface.cpp b/wifi/1.4/default/wifi_sta_iface.cpp
similarity index 99%
rename from wifi/1.3/default/wifi_sta_iface.cpp
rename to wifi/1.4/default/wifi_sta_iface.cpp
index a6539e5..3e0127e 100644
--- a/wifi/1.3/default/wifi_sta_iface.cpp
+++ b/wifi/1.4/default/wifi_sta_iface.cpp
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using hidl_return_util::validateAndCall;
@@ -641,7 +641,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_sta_iface.h b/wifi/1.4/default/wifi_sta_iface.h
similarity index 99%
rename from wifi/1.3/default/wifi_sta_iface.h
rename to wifi/1.4/default/wifi_sta_iface.h
index 9224939..d8f7a01 100644
--- a/wifi/1.3/default/wifi_sta_iface.h
+++ b/wifi/1.4/default/wifi_sta_iface.h
@@ -28,7 +28,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
@@ -171,7 +171,7 @@
};
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_status_util.cpp b/wifi/1.4/default/wifi_status_util.cpp
similarity index 98%
rename from wifi/1.3/default/wifi_status_util.cpp
rename to wifi/1.4/default/wifi_status_util.cpp
index 0a5bb13..597c04b 100644
--- a/wifi/1.3/default/wifi_status_util.cpp
+++ b/wifi/1.4/default/wifi_status_util.cpp
@@ -19,7 +19,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
std::string legacyErrorToString(legacy_hal::wifi_error error) {
@@ -100,7 +100,7 @@
}
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/wifi_status_util.h b/wifi/1.4/default/wifi_status_util.h
similarity index 97%
rename from wifi/1.3/default/wifi_status_util.h
rename to wifi/1.4/default/wifi_status_util.h
index bc8baa9..41bcfaf 100644
--- a/wifi/1.3/default/wifi_status_util.h
+++ b/wifi/1.4/default/wifi_status_util.h
@@ -24,7 +24,7 @@
namespace android {
namespace hardware {
namespace wifi {
-namespace V1_3 {
+namespace V1_4 {
namespace implementation {
using namespace android::hardware::wifi::V1_0;
@@ -37,7 +37,7 @@
WifiStatus createWifiStatusFromLegacyError(legacy_hal::wifi_error error);
} // namespace implementation
-} // namespace V1_3
+} // namespace V1_4
} // namespace wifi
} // namespace hardware
} // namespace android
diff --git a/wifi/1.3/default/OWNERS b/wifi/1.4/vts/OWNERS
similarity index 100%
copy from wifi/1.3/default/OWNERS
copy to wifi/1.4/vts/OWNERS
diff --git a/wifi/1.4/vts/functional/Android.bp b/wifi/1.4/vts/functional/Android.bp
new file mode 100644
index 0000000..42c60f2
--- /dev/null
+++ b/wifi/1.4/vts/functional/Android.bp
@@ -0,0 +1,33 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// SoftAP-specific tests, similar to VtsHalWifiApV1_0TargetTest.
+cc_test {
+ name: "VtsHalWifiApV1_4TargetTest",
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "VtsHalWifiV1_4TargetTest.cpp",
+ "wifi_ap_iface_hidl_test.cpp",
+ ],
+ static_libs: [
+ "VtsHalWifiV1_0TargetTestUtil",
+ "android.hardware.wifi@1.0",
+ "android.hardware.wifi@1.1",
+ "android.hardware.wifi@1.2",
+ "android.hardware.wifi@1.3",
+ "android.hardware.wifi@1.4",
+ ],
+}
diff --git a/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp b/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp
new file mode 100644
index 0000000..deac0fa
--- /dev/null
+++ b/wifi/1.4/vts/functional/VtsHalWifiV1_4TargetTest.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+#include <android/hardware/wifi/1.4/IWifi.h>
+
+#include "wifi_hidl_test_utils.h"
+
+using ::android::hardware::wifi::V1_4::IWifi;
+
+// Test environment for Wifi HIDL HAL.
+class WifiHidlEnvironment_1_4 : public WifiHidlEnvironment {
+ public:
+ // get the test environment singleton
+ static WifiHidlEnvironment_1_4* Instance() {
+ static WifiHidlEnvironment_1_4* instance = new WifiHidlEnvironment_1_4;
+ return instance;
+ }
+
+ virtual void registerTestServices() override {
+ registerTestService<android::hardware::wifi::V1_4::IWifi>();
+ }
+
+ private:
+ WifiHidlEnvironment_1_4() {}
+};
+
+WifiHidlEnvironment_1_4* gEnv = WifiHidlEnvironment_1_4::Instance();
+
+int main(int argc, char** argv) {
+ ::testing::AddGlobalTestEnvironment(gEnv);
+ ::testing::InitGoogleTest(&argc, argv);
+ gEnv->init(&argc, argv);
+ int status = RUN_ALL_TESTS();
+ LOG(INFO) << "Test result = " << status;
+ return status;
+}
diff --git a/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
new file mode 100644
index 0000000..68e9bbb
--- /dev/null
+++ b/wifi/1.4/vts/functional/wifi_ap_iface_hidl_test.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Staache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android/hardware/wifi/1.4/IWifiApIface.h>
+
+#include <VtsHalHidlTargetTestBase.h>
+
+#include "wifi_hidl_call_util.h"
+#include "wifi_hidl_test_utils.h"
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::wifi::V1_0::WifiStatus;
+using ::android::hardware::wifi::V1_0::WifiStatusCode;
+using ::android::hardware::wifi::V1_4::IWifiApIface;
+
+extern WifiHidlEnvironment* gEnv;
+
+/**
+ * Fixture to use for all STA Iface HIDL interface tests.
+ */
+class WifiApIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ wifi_ap_iface_ = IWifiApIface::castFrom(getWifiApIface());
+ ASSERT_NE(nullptr, wifi_ap_iface_.get());
+ }
+
+ virtual void TearDown() override {
+ stopWifi();
+ }
+
+ protected:
+ sp<IWifiApIface> wifi_ap_iface_;
+};
+
+/*
+ * SetMacAddress:
+ * Ensures that calls to set MAC address will return a success status
+ * code.
+ */
+TEST_F(WifiApIfaceHidlTest, SetMacAddress) {
+ const hidl_array<uint8_t, 6> kMac{{0x12, 0x22, 0x33, 0x52, 0x10, 0x41}};
+ EXPECT_EQ(WifiStatusCode::SUCCESS,
+ HIDL_INVOKE(wifi_ap_iface_, setMacAddress, kMac).code);
+}
+
+/*
+ * GetFactoryMacAddress:
+ * Ensures that calls to get factory MAC address will retrieve a non-zero MAC
+ * and return a success status code.
+ */
+TEST_F(WifiApIfaceHidlTest, GetFactoryMacAddress) {
+ std::pair<WifiStatus, hidl_array<uint8_t, 6> > status_and_mac =
+ HIDL_INVOKE(wifi_ap_iface_, getFactoryMacAddress);
+ EXPECT_EQ(WifiStatusCode::SUCCESS, status_and_mac.first.code);
+ hidl_array<uint8_t, 6> all_zero{};
+ EXPECT_NE(all_zero, status_and_mac.second);
+}
diff --git a/wifi/supplicant/1.3/Android.bp b/wifi/supplicant/1.3/Android.bp
index 6633d9d..3f20531 100644
--- a/wifi/supplicant/1.3/Android.bp
+++ b/wifi/supplicant/1.3/Android.bp
@@ -9,6 +9,8 @@
srcs: [
"types.hal",
"ISupplicant.hal",
+ "ISupplicantStaIface.hal",
+ "ISupplicantStaIfaceCallback.hal",
"ISupplicantStaNetwork.hal",
],
interfaces: [
diff --git a/wifi/supplicant/1.3/ISupplicantStaIface.hal b/wifi/supplicant/1.3/ISupplicantStaIface.hal
new file mode 100644
index 0000000..62b4033
--- /dev/null
+++ b/wifi/supplicant/1.3/ISupplicantStaIface.hal
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.wifi.supplicant@1.3;
+
+import @1.0::SupplicantStatus;
+import @1.2::ISupplicantStaIface;
+import @1.3::ISupplicantStaIfaceCallback;
+
+/**
+ * Interface exposed by the supplicant for each station mode network
+ * interface (e.g wlan0) it controls.
+ */
+interface ISupplicantStaIface extends @1.2::ISupplicantStaIface {
+ /**
+ * Register for callbacks from this interface.
+ *
+ * These callbacks are invoked for events that are specific to this interface.
+ * Registration of multiple callback objects is supported. These objects must
+ * be automatically deleted when the corresponding client process is dead or
+ * if this interface is removed.
+ *
+ * @param callback An instance of the |ISupplicantStaIfaceCallback| HIDL
+ * interface object.
+ * @return status Status of the operation.
+ * Possible status codes:
+ * |SupplicantStatusCode.SUCCESS|,
+ * |SupplicantStatusCode.FAILURE_UNKNOWN|,
+ * |SupplicantStatusCode.FAILURE_IFACE_INVALID|
+ */
+ registerCallback_1_3(ISupplicantStaIfaceCallback callback)
+ generates (SupplicantStatus status);
+};
diff --git a/wifi/supplicant/1.3/ISupplicantStaIfaceCallback.hal b/wifi/supplicant/1.3/ISupplicantStaIfaceCallback.hal
new file mode 100644
index 0000000..107e0fc
--- /dev/null
+++ b/wifi/supplicant/1.3/ISupplicantStaIfaceCallback.hal
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.wifi.supplicant@1.3;
+
+import @1.2::ISupplicantStaIfaceCallback;
+
+/**
+ * Callback Interface exposed by the supplicant service
+ * for each station mode interface (ISupplicantStaIface).
+ *
+ * Clients need to host an instance of this HIDL interface object and
+ * pass a reference of the object to the supplicant via the
+ * corresponding |ISupplicantStaIface.registerCallback_1_3| method.
+ */
+interface ISupplicantStaIfaceCallback extends @1.2::ISupplicantStaIfaceCallback {
+ /**
+ * Indicates PMK cache added event.
+ *
+ * @param expirationTimeInSec expiration time in seconds
+ * @param serializedEntry is serialized PMK cache entry, the content is
+ * opaque for the framework and depends on the native implementation.
+ */
+ oneway onPmkCacheAdded(int64_t expirationTimeInSec, vec<uint8_t> serializedEntry);
+};
diff --git a/wifi/supplicant/1.3/ISupplicantStaNetwork.hal b/wifi/supplicant/1.3/ISupplicantStaNetwork.hal
index eb9de9a..5e265c6 100644
--- a/wifi/supplicant/1.3/ISupplicantStaNetwork.hal
+++ b/wifi/supplicant/1.3/ISupplicantStaNetwork.hal
@@ -47,4 +47,19 @@
* @return ocspType ocsp type.
*/
getOcsp() generates (SupplicantStatus status, OcspType ocspType);
+
+ /**
+ * Add a PMK into supplicant PMK cache.
+ *
+ * @param serializedEntry is serialized PMK cache entry, the content is
+ * opaque for the framework and depends on the native implementation.
+ * @return status Status of the operation
+ * Possible status codes:
+ * |SupplicantStatusCode.SUCCESS|,
+ * |SupplicantStatusCode.FAILURE_ARGS_INVALID|,
+ * |SupplicantStatusCode.FAILURE_UNKNOWN|,
+ * |SupplicantStatusCode.FAILURE_NETWORK_INVALID|
+ */
+ setPmkCache(vec<uint8_t> serializedEntry)
+ generates (SupplicantStatus status);
};
diff --git a/wifi/supplicant/1.3/vts/functional/Android.bp b/wifi/supplicant/1.3/vts/functional/Android.bp
index 67c7348..abb8600 100644
--- a/wifi/supplicant/1.3/vts/functional/Android.bp
+++ b/wifi/supplicant/1.3/vts/functional/Android.bp
@@ -42,6 +42,7 @@
defaults: ["VtsHalTargetTestDefaults"],
srcs: [
"VtsHalWifiSupplicantV1_3TargetTest.cpp",
+ "supplicant_sta_iface_hidl_test.cpp",
"supplicant_sta_network_hidl_test.cpp",
],
static_libs: [
diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.cpp b/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.cpp
index 86959eb..308808d 100644
--- a/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.cpp
+++ b/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.cpp
@@ -21,8 +21,13 @@
#include "supplicant_hidl_test_utils_1_3.h"
using ::android::sp;
+using ::android::hardware::wifi::supplicant::V1_3::ISupplicantStaIface;
using ::android::hardware::wifi::supplicant::V1_3::ISupplicantStaNetwork;
+sp<ISupplicantStaIface> getSupplicantStaIface_1_3() {
+ return ISupplicantStaIface::castFrom(getSupplicantStaIface());
+}
+
sp<ISupplicantStaNetwork> createSupplicantStaNetwork_1_3() {
return ISupplicantStaNetwork::castFrom(createSupplicantStaNetwork());
}
diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.h b/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.h
index 8e64162..39dbb8f 100644
--- a/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.h
+++ b/wifi/supplicant/1.3/vts/functional/supplicant_hidl_test_utils_1_3.h
@@ -17,8 +17,11 @@
#ifndef SUPPLICANT_HIDL_TEST_UTILS_1_3_H
#define SUPPLICANT_HIDL_TEST_UTILS_1_3_H
+#include <android/hardware/wifi/supplicant/1.3/ISupplicantStaIface.h>
#include <android/hardware/wifi/supplicant/1.3/ISupplicantStaNetwork.h>
+android::sp<android::hardware::wifi::supplicant::V1_3::ISupplicantStaIface>
+getSupplicantStaIface_1_3();
android::sp<android::hardware::wifi::supplicant::V1_3::ISupplicantStaNetwork>
createSupplicantStaNetwork_1_3();
diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp b/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp
new file mode 100644
index 0000000..9b68a47
--- /dev/null
+++ b/wifi/supplicant/1.3/vts/functional/supplicant_sta_iface_hidl_test.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <VtsHalHidlTargetTestBase.h>
+#include <android/hardware/wifi/supplicant/1.2/types.h>
+#include <android/hardware/wifi/supplicant/1.3/ISupplicantStaIface.h>
+#include <android/hardware/wifi/supplicant/1.3/ISupplicantStaIfaceCallback.h>
+#include <android/hardware/wifi/supplicant/1.3/ISupplicantStaNetwork.h>
+#include <android/hardware/wifi/supplicant/1.3/types.h>
+#include <hidl/HidlSupport.h>
+#include <hidl/Status.h>
+
+#include "supplicant_hidl_test_utils.h"
+#include "supplicant_hidl_test_utils_1_3.h"
+
+using ::android::sp;
+using ::android::hardware::hidl_array;
+using ::android::hardware::hidl_string;
+using ::android::hardware::hidl_vec;
+using ::android::hardware::Return;
+using ::android::hardware::Void;
+using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatus;
+using ::android::hardware::wifi::supplicant::V1_0::SupplicantStatusCode;
+using ::android::hardware::wifi::supplicant::V1_2::DppAkm;
+using ::android::hardware::wifi::supplicant::V1_2::DppFailureCode;
+using ::android::hardware::wifi::supplicant::V1_2::DppProgressCode;
+using ::android::hardware::wifi::supplicant::V1_3::ISupplicantStaIface;
+using ::android::hardware::wifi::supplicant::V1_3::ISupplicantStaIfaceCallback;
+using ::android::hardware::wifi::supplicant::V1_3::ISupplicantStaNetwork;
+
+class SupplicantStaIfaceHidlTest : public ::testing::VtsHalHidlTargetTestBase {
+ public:
+ virtual void SetUp() override {
+ startSupplicantAndWaitForHidlService();
+ EXPECT_TRUE(turnOnExcessiveLogging());
+ sta_iface_ = getSupplicantStaIface_1_3();
+ ASSERT_NE(sta_iface_.get(), nullptr);
+ }
+
+ virtual void TearDown() override { stopSupplicant(); }
+
+ int64_t pmkCacheExpirationTimeInSec;
+ std::vector<uint8_t> serializedPmkCacheEntry;
+
+ protected:
+ // ISupplicantStaIface object used for all tests in this fixture.
+ sp<ISupplicantStaIface> sta_iface_;
+};
+
+class IfaceCallback : public ISupplicantStaIfaceCallback {
+ Return<void> onNetworkAdded(uint32_t /* id */) override { return Void(); }
+ Return<void> onNetworkRemoved(uint32_t /* id */) override { return Void(); }
+ Return<void> onStateChanged(
+ ISupplicantStaIfaceCallback::State /* newState */,
+ const hidl_array<uint8_t, 6>& /*bssid */, uint32_t /* id */,
+ const hidl_vec<uint8_t>& /* ssid */) override {
+ return Void();
+ }
+ Return<void> onAnqpQueryDone(
+ const hidl_array<uint8_t, 6>& /* bssid */,
+ const ISupplicantStaIfaceCallback::AnqpData& /* data */,
+ const ISupplicantStaIfaceCallback::Hs20AnqpData& /* hs20Data */)
+ override {
+ return Void();
+ }
+ virtual Return<void> onHs20IconQueryDone(
+ const hidl_array<uint8_t, 6>& /* bssid */,
+ const hidl_string& /* fileName */,
+ const hidl_vec<uint8_t>& /* data */) override {
+ return Void();
+ }
+ virtual Return<void> onHs20SubscriptionRemediation(
+ const hidl_array<uint8_t, 6>& /* bssid */,
+ ISupplicantStaIfaceCallback::OsuMethod /* osuMethod */,
+ const hidl_string& /* url*/) override {
+ return Void();
+ }
+ Return<void> onHs20DeauthImminentNotice(
+ const hidl_array<uint8_t, 6>& /* bssid */, uint32_t /* reasonCode */,
+ uint32_t /* reAuthDelayInSec */,
+ const hidl_string& /* url */) override {
+ return Void();
+ }
+ Return<void> onDisconnected(const hidl_array<uint8_t, 6>& /* bssid */,
+ bool /* locallyGenerated */,
+ ISupplicantStaIfaceCallback::ReasonCode
+ /* reasonCode */) override {
+ return Void();
+ }
+ Return<void> onAssociationRejected(
+ const hidl_array<uint8_t, 6>& /* bssid */,
+ ISupplicantStaIfaceCallback::StatusCode /* statusCode */,
+ bool /*timedOut */) override {
+ return Void();
+ }
+ Return<void> onAuthenticationTimeout(
+ const hidl_array<uint8_t, 6>& /* bssid */) override {
+ return Void();
+ }
+ Return<void> onBssidChanged(
+ ISupplicantStaIfaceCallback::BssidChangeReason /* reason */,
+ const hidl_array<uint8_t, 6>& /* bssid */) override {
+ return Void();
+ }
+ Return<void> onEapFailure() override { return Void(); }
+ Return<void> onEapFailure_1_1(
+ ISupplicantStaIfaceCallback::EapErrorCode /* eapErrorCode */) override {
+ return Void();
+ }
+ Return<void> onWpsEventSuccess() override { return Void(); }
+ Return<void> onWpsEventFail(
+ const hidl_array<uint8_t, 6>& /* bssid */,
+ ISupplicantStaIfaceCallback::WpsConfigError /* configError */,
+ ISupplicantStaIfaceCallback::WpsErrorIndication /* errorInd */)
+ override {
+ return Void();
+ }
+ Return<void> onWpsEventPbcOverlap() override { return Void(); }
+ Return<void> onExtRadioWorkStart(uint32_t /* id */) override {
+ return Void();
+ }
+ Return<void> onExtRadioWorkTimeout(uint32_t /* id*/) override {
+ return Void();
+ }
+ Return<void> onDppSuccessConfigReceived(
+ const hidl_vec<uint8_t>& /* ssid */, const hidl_string& /* password */,
+ const hidl_array<uint8_t, 32>& /* psk */,
+ DppAkm /* securityAkm */) override {
+ return Void();
+ }
+ Return<void> onDppSuccessConfigSent() override { return Void(); }
+ Return<void> onDppProgress(DppProgressCode /* code */) override {
+ return Void();
+ }
+ Return<void> onDppFailure(DppFailureCode /* code */) override {
+ return Void();
+ }
+ Return<void> onPmkCacheAdded(
+ int64_t /* expirationTimeInSec */,
+ const hidl_vec<uint8_t>& /* serializedEntry */) override {
+ return Void();
+ }
+};
+
+class IfacePmkCacheCallback : public IfaceCallback {
+ SupplicantStaIfaceHidlTest& parent_;
+ Return<void> onPmkCacheAdded(
+ int64_t expirationTimeInSec,
+ const hidl_vec<uint8_t>& serializedEntry) override {
+ parent_.pmkCacheExpirationTimeInSec = expirationTimeInSec;
+ parent_.serializedPmkCacheEntry = serializedEntry;
+ return Void();
+ }
+
+ public:
+ IfacePmkCacheCallback(SupplicantStaIfaceHidlTest& parent)
+ : parent_(parent) {}
+};
+
+/*
+ * RegisterCallback_1_3
+ */
+TEST_F(SupplicantStaIfaceHidlTest, RegisterCallback_1_3) {
+ sta_iface_->registerCallback_1_3(
+ new IfaceCallback(), [](const SupplicantStatus& status) {
+ EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ });
+}
diff --git a/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp b/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp
index e5be0cc..07bc9d8 100644
--- a/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp
+++ b/wifi/supplicant/1.3/vts/functional/supplicant_sta_network_hidl_test.cpp
@@ -71,3 +71,16 @@
EXPECT_EQ(testOcspType, ocspType);
});
}
+
+/*
+ * SetPmkCacheEntry
+ */
+TEST_F(SupplicantStaNetworkHidlTest, SetPmkCache) {
+ uint8_t bytes[128] = {0};
+ std::vector<uint8_t> serializedEntry(bytes, bytes + sizeof(bytes));
+
+ sta_network_->setPmkCache(
+ serializedEntry, [](const SupplicantStatus &status) {
+ EXPECT_EQ(SupplicantStatusCode::SUCCESS, status.code);
+ });
+}