Merge "Add EVS 1.1 to current compatibility matrix" into rvc-dev
diff --git a/camera/metadata/3.5/types.hal b/camera/metadata/3.5/types.hal
index 4e2252c..d32bc91 100644
--- a/camera/metadata/3.5/types.hal
+++ b/camera/metadata/3.5/types.hal
@@ -35,28 +35,29 @@
* '/system/media/camera/docs/docs.html' in the corresponding Android source tree.</p>
*/
enum CameraMetadataTag : @3.4::CameraMetadataTag {
- /** android.control.availableBokehMaxSizes [static, int32[], ndk_public]
+ /** android.control.availableExtendedSceneModeMaxSizes [static, int32[], ndk_public]
*
- * <p>The list of bokeh modes for ANDROID_CONTROL_BOKEH_MODE that are supported by this camera
- * device, and each bokeh mode's maximum streaming (non-stall) size with bokeh effect.</p>
+ * <p>The list of extended scene modes for ANDROID_CONTROL_EXTENDED_SCENE_MODE that are supported
+ * by this camera device, and each extended scene mode's maximum streaming (non-stall) size
+ * with effect.</p>
*
- * @see ANDROID_CONTROL_BOKEH_MODE
+ * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
*/
- ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES = android.hardware.camera.metadata@3.3::CameraMetadataTag:ANDROID_CONTROL_END_3_3,
+ ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES = android.hardware.camera.metadata@3.3::CameraMetadataTag:ANDROID_CONTROL_END_3_3,
- /** android.control.availableBokehZoomRatioRanges [static, float[], ndk_public]
+ /** android.control.availableExtendedSceneModeZoomRatioRanges [static, float[], ndk_public]
*
- * <p>The ranges of supported zoom ratio for non-OFF ANDROID_CONTROL_BOKEH_MODE.</p>
+ * <p>The ranges of supported zoom ratio for non-DISABLED ANDROID_CONTROL_EXTENDED_SCENE_MODE.</p>
*
- * @see ANDROID_CONTROL_BOKEH_MODE
+ * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
*/
- ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES,
+ ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
- /** android.control.bokehMode [dynamic, enum, public]
+ /** android.control.extendedSceneMode [dynamic, enum, public]
*
- * <p>Whether bokeh mode is enabled for a particular capture request.</p>
+ * <p>Whether extended scene mode is enabled for a particular capture request.</p>
*/
- ANDROID_CONTROL_BOKEH_MODE,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE,
/** android.control.zoomRatioRange [static, float[], public]
*
@@ -95,13 +96,22 @@
* Enumeration definitions for the various entries that need them
*/
-/** android.control.bokehMode enumeration values
- * @see ANDROID_CONTROL_BOKEH_MODE
+/** android.control.mode enumeration values added since v3.2
+ * @see ANDROID_CONTROL_MODE
*/
-enum CameraMetadataEnumAndroidControlBokehMode : uint32_t {
- ANDROID_CONTROL_BOKEH_MODE_OFF,
- ANDROID_CONTROL_BOKEH_MODE_STILL_CAPTURE,
- ANDROID_CONTROL_BOKEH_MODE_CONTINUOUS,
+enum CameraMetadataEnumAndroidControlMode :
+ @3.2::CameraMetadataEnumAndroidControlMode {
+ ANDROID_CONTROL_MODE_USE_EXTENDED_SCENE_MODE,
+};
+
+/** android.control.extendedSceneMode enumeration values
+ * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
+ */
+enum CameraMetadataEnumAndroidControlExtendedSceneMode : uint32_t {
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED = 0,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_STILL_CAPTURE,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_CONTINUOUS,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE_VENDOR_START = 0x40,
};
/** android.lens.poseReference enumeration values added since v3.3
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index c9d76da..05b8b47 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -784,7 +784,7 @@
const CameraMetadata& chars, int deviceVersion,
const hidl_vec<hidl_string>& deviceNames);
void verifyCameraCharacteristics(Status status, const CameraMetadata& chars);
- void verifyBokehCharacteristics(const camera_metadata_t* metadata);
+ void verifyExtendedSceneModeCharacteristics(const camera_metadata_t* metadata);
void verifyZoomCharacteristics(const camera_metadata_t* metadata);
void verifyRecommendedConfigs(const CameraMetadata& metadata);
void verifyMonochromeCharacteristics(const CameraMetadata& chars, int deviceVersion);
@@ -6588,77 +6588,98 @@
poseReference >= ANDROID_LENS_POSE_REFERENCE_PRIMARY_CAMERA);
}
- verifyBokehCharacteristics(metadata);
+ verifyExtendedSceneModeCharacteristics(metadata);
verifyZoomCharacteristics(metadata);
}
-void CameraHidlTest::verifyBokehCharacteristics(const camera_metadata_t* metadata) {
+void CameraHidlTest::verifyExtendedSceneModeCharacteristics(const camera_metadata_t* metadata) {
camera_metadata_ro_entry entry;
int retcode = 0;
+ retcode = find_camera_metadata_ro_entry(metadata, ANDROID_CONTROL_AVAILABLE_MODES, &entry);
+ if ((0 == retcode) && (entry.count > 0)) {
+ for (auto i = 0; i < entry.count; i++) {
+ ASSERT_TRUE(entry.data.u8[i] >= ANDROID_CONTROL_MODE_OFF &&
+ entry.data.u8[i] <= ANDROID_CONTROL_MODE_USE_EXTENDED_SCENE_MODE);
+ }
+ } else {
+ ADD_FAILURE() << "Get camera controlAvailableModes failed!";
+ }
+
// Check key availability in capabilities, request and result.
retcode = find_camera_metadata_ro_entry(metadata,
ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, &entry);
- bool hasBokehRequestKey = false;
+ bool hasExtendedSceneModeRequestKey = false;
if ((0 == retcode) && (entry.count > 0)) {
- hasBokehRequestKey = std::find(entry.data.i32, entry.data.i32+entry.count,
- ANDROID_CONTROL_BOKEH_MODE) != entry.data.i32+entry.count;
+ hasExtendedSceneModeRequestKey =
+ std::find(entry.data.i32, entry.data.i32 + entry.count,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE) != entry.data.i32 + entry.count;
} else {
ADD_FAILURE() << "Get camera availableRequestKeys failed!";
}
retcode = find_camera_metadata_ro_entry(metadata,
ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, &entry);
- bool hasBokehResultKey = false;
+ bool hasExtendedSceneModeResultKey = false;
if ((0 == retcode) && (entry.count > 0)) {
- hasBokehResultKey = std::find(entry.data.i32, entry.data.i32+entry.count,
- ANDROID_CONTROL_BOKEH_MODE) != entry.data.i32+entry.count;
+ hasExtendedSceneModeResultKey =
+ std::find(entry.data.i32, entry.data.i32 + entry.count,
+ ANDROID_CONTROL_EXTENDED_SCENE_MODE) != entry.data.i32 + entry.count;
} else {
ADD_FAILURE() << "Get camera availableResultKeys failed!";
}
retcode = find_camera_metadata_ro_entry(metadata,
ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, &entry);
- bool hasBokehMaxSizesKey = false;
- bool hasBokehZoomRatioRangesKey = false;
+ bool hasExtendedSceneModeMaxSizesKey = false;
+ bool hasExtendedSceneModeZoomRatioRangesKey = false;
if ((0 == retcode) && (entry.count > 0)) {
- hasBokehMaxSizesKey = std::find(entry.data.i32, entry.data.i32+entry.count,
- ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES) != entry.data.i32+entry.count;
- hasBokehZoomRatioRangesKey = std::find(entry.data.i32, entry.data.i32+entry.count,
- ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES) != entry.data.i32+entry.count;
+ hasExtendedSceneModeMaxSizesKey =
+ std::find(entry.data.i32, entry.data.i32 + entry.count,
+ ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES) !=
+ entry.data.i32 + entry.count;
+ hasExtendedSceneModeZoomRatioRangesKey =
+ std::find(entry.data.i32, entry.data.i32 + entry.count,
+ ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES) !=
+ entry.data.i32 + entry.count;
} else {
ADD_FAILURE() << "Get camera availableCharacteristicsKeys failed!";
}
camera_metadata_ro_entry maxSizesEntry;
- retcode = find_camera_metadata_ro_entry(metadata,
- ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES, &maxSizesEntry);
- bool hasBokehMaxSizes = (0 == retcode && maxSizesEntry.count > 0);
+ retcode = find_camera_metadata_ro_entry(
+ metadata, ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES, &maxSizesEntry);
+ bool hasExtendedSceneModeMaxSizes = (0 == retcode && maxSizesEntry.count > 0);
camera_metadata_ro_entry zoomRatioRangesEntry;
- retcode = find_camera_metadata_ro_entry(metadata,
- ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES, &zoomRatioRangesEntry);
- bool hasBokehZoomRatioRanges = (0 == retcode && zoomRatioRangesEntry.count > 0);
+ retcode = find_camera_metadata_ro_entry(
+ metadata, ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
+ &zoomRatioRangesEntry);
+ bool hasExtendedSceneModeZoomRatioRanges = (0 == retcode && zoomRatioRangesEntry.count > 0);
- // Bokeh keys must all be available, or all be unavailable.
- bool noBokeh = !hasBokehRequestKey && !hasBokehResultKey && !hasBokehMaxSizesKey &&
- !hasBokehZoomRatioRangesKey && !hasBokehMaxSizes && !hasBokehZoomRatioRanges;
- if (noBokeh) {
+ // Extended scene mode keys must all be available, or all be unavailable.
+ bool noExtendedSceneMode =
+ !hasExtendedSceneModeRequestKey && !hasExtendedSceneModeResultKey &&
+ !hasExtendedSceneModeMaxSizesKey && !hasExtendedSceneModeZoomRatioRangesKey &&
+ !hasExtendedSceneModeMaxSizes && !hasExtendedSceneModeZoomRatioRanges;
+ if (noExtendedSceneMode) {
return;
}
- bool hasBokeh = hasBokehRequestKey && hasBokehResultKey && hasBokehMaxSizesKey &&
- hasBokehZoomRatioRangesKey && hasBokehMaxSizes && hasBokehZoomRatioRanges;
- ASSERT_TRUE(hasBokeh);
+ bool hasExtendedSceneMode = hasExtendedSceneModeRequestKey && hasExtendedSceneModeResultKey &&
+ hasExtendedSceneModeMaxSizesKey &&
+ hasExtendedSceneModeZoomRatioRangesKey &&
+ hasExtendedSceneModeMaxSizes && hasExtendedSceneModeZoomRatioRanges;
+ ASSERT_TRUE(hasExtendedSceneMode);
- // Must have OFF, and must have one of STILL_CAPTURE and CONTINUOUS.
- // Only valid combinations: {OFF, CONTINUOUS}, {OFF, STILL_CAPTURE}, and
- // {OFF, CONTINUOUS, STILL_CAPTURE}.
+ // Must have DISABLED, and must have one of BOKEH_STILL_CAPTURE, BOKEH_CONTINUOUS, or a VENDOR
+ // mode.
ASSERT_TRUE((maxSizesEntry.count == 6 && zoomRatioRangesEntry.count == 2) ||
(maxSizesEntry.count == 9 && zoomRatioRangesEntry.count == 4));
- bool hasOffMode = false;
- bool hasStillCaptureMode = false;
- bool hasContinuousMode = false;
+ bool hasDisabledMode = false;
+ bool hasBokehStillCaptureMode = false;
+ bool hasBokehContinuousMode = false;
+ bool hasVendorMode = false;
std::vector<AvailableStream> outputStreams;
ASSERT_EQ(Status::OK, getAvailableOutputStreams(metadata, outputStreams));
for (int i = 0, j = 0; i < maxSizesEntry.count && j < zoomRatioRangesEntry.count; i += 3) {
@@ -6666,24 +6687,29 @@
int32_t maxWidth = maxSizesEntry.data.i32[i+1];
int32_t maxHeight = maxSizesEntry.data.i32[i+2];
switch (mode) {
- case ANDROID_CONTROL_BOKEH_MODE_OFF:
- hasOffMode = true;
+ case ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED:
+ hasDisabledMode = true;
ASSERT_TRUE(maxWidth == 0 && maxHeight == 0);
break;
- case ANDROID_CONTROL_BOKEH_MODE_STILL_CAPTURE:
- hasStillCaptureMode = true;
+ case ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_STILL_CAPTURE:
+ hasBokehStillCaptureMode = true;
j += 2;
break;
- case ANDROID_CONTROL_BOKEH_MODE_CONTINUOUS:
- hasContinuousMode = true;
+ case ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_CONTINUOUS:
+ hasBokehContinuousMode = true;
j += 2;
break;
default:
- ADD_FAILURE() << "Invalid bokehMode advertised: " << mode;
+ if (mode < ANDROID_CONTROL_EXTENDED_SCENE_MODE_VENDOR_START) {
+ ADD_FAILURE() << "Invalid extended scene mode advertised: " << mode;
+ } else {
+ hasVendorMode = true;
+ j += 2;
+ }
break;
}
- if (mode != ANDROID_CONTROL_BOKEH_MODE_OFF) {
+ if (mode != ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED) {
// Make sure size is supported.
bool sizeSupported = false;
for (const auto& stream : outputStreams) {
@@ -6703,8 +6729,8 @@
ASSERT_LE(minZoomRatio, maxZoomRatio);
}
}
- ASSERT_TRUE(hasOffMode);
- ASSERT_TRUE(hasStillCaptureMode || hasContinuousMode);
+ ASSERT_TRUE(hasDisabledMode);
+ ASSERT_TRUE(hasBokehStillCaptureMode || hasBokehContinuousMode || hasVendorMode);
}
void CameraHidlTest::verifyZoomCharacteristics(const camera_metadata_t* metadata) {
diff --git a/camera/provider/2.6/ICameraProvider.hal b/camera/provider/2.6/ICameraProvider.hal
index 5651550..ed1d31d 100644
--- a/camera/provider/2.6/ICameraProvider.hal
+++ b/camera/provider/2.6/ICameraProvider.hal
@@ -76,6 +76,13 @@
* configuration settings exposed through camera metadata), should the sum
* of resource costs for the combination be <= 100.
*
+ * The lists of camera id combinations returned by this method may contain
+ * hidden physical camera ids. If a combination does contain hidden physical
+ * camera ids, the camera framework must be able to open any logical cameras
+ * that contain these hidden physical camera ids in their
+ * ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS list, in addition to the other
+ * camera ids advertised in the combination, for concurrent operation.
+ *
* @return status Status code for the operation
* @return cameraIds a list of camera id combinations that support
* concurrent stream configurations with the minimum guarantees
diff --git a/compatibility_matrices/Android.bp b/compatibility_matrices/Android.bp
index 7883dd9..33157a6 100644
--- a/compatibility_matrices/Android.bp
+++ b/compatibility_matrices/Android.bp
@@ -83,8 +83,8 @@
"compatibility_matrix.current.xml",
],
kernel_configs: [
- "kernel_config_current_4.14",
- "kernel_config_current_4.19",
- "kernel_config_current_5.4",
+ "kernel_config_r_4.14",
+ "kernel_config_r_4.19",
+ "kernel_config_r_5.4",
]
}
diff --git a/compatibility_matrices/compatibility_matrix.4.xml b/compatibility_matrices/compatibility_matrix.4.xml
index e5e012c..01ec172 100644
--- a/compatibility_matrices/compatibility_matrix.4.xml
+++ b/compatibility_matrices/compatibility_matrix.4.xml
@@ -181,6 +181,12 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.gnss</name>
+ <!--
+ - Both versions are listed here as a workaround for libvintf since 2.0 extends 1.1.
+ - Devices launched with Q must support gnss@2.0, see VtsTrebleVendorVintfTest
+ - test DeviceManifestTest#GnssHalVersionCompatibility.
+ -->
+ <version>1.1</version>
<version>2.0</version>
<interface>
<name>IGnss</name>
@@ -423,6 +429,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.thermal</name>
+ <version>1.0-1</version>
<version>2.0</version>
<interface>
<name>IThermal</name>
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index a1705de..3d201b5 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -209,6 +209,12 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.gnss</name>
+ <!--
+ - Both versions are listed here as a workaround for libvintf since 2.0 extends 1.1.
+ - Devices launched with Q must support gnss@2.0, see VtsTrebleVendorVintfTest
+ - test DeviceManifestTest#GnssHalVersionCompatibility.
+ -->
+ <version>1.1</version>
<version>2.0-1</version>
<interface>
<name>IGnss</name>
@@ -464,6 +470,7 @@
</hal>
<hal format="hidl" optional="true">
<name>android.hardware.thermal</name>
+ <version>1.0</version>
<version>2.0</version>
<interface>
<name>IThermal</name>
diff --git a/current.txt b/current.txt
index 5d862d8..47b7206 100644
--- a/current.txt
+++ b/current.txt
@@ -641,7 +641,7 @@
74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types
b8c63679e1a3874b356f3e691aecce1191d38f59063cf2ed2dce8a9d4cabf00e android.hardware.camera.device@3.6::ICameraDevice
a35d5151b48505f06a775b38c0e2e265f80a845d92802324c643565807f81c53 android.hardware.camera.device@3.6::types
-99aae72ae75a8ddf63ccffbc585f3a55f4d0847b4adff3d7a0f8bd9e2305bec1 android.hardware.camera.provider@2.6::ICameraProvider
+21086e1c7a2acc0ebe0ff8561b11f3c2009be687a92d79b608a5f00b16c5f598 android.hardware.camera.provider@2.6::ICameraProvider
8f8d9463508ff9cae88eb35c429fd0e2dbca0ca8f5de7fdf836cc0c4370becb6 android.hardware.camera.provider@2.6::ICameraProviderCallback
c1aa508d00b66ed5feefea398fd5edf28fa651ac89773adad7dfda4e0a73a952 android.hardware.cas@1.2::ICas
9811f867def49b420d8c707f7e38d3bdd64f835244e1d2a5e9762ab9835672dc android.hardware.cas@1.2::ICasListener
@@ -681,7 +681,7 @@
b454df853441c12f6e425e8a60dd29fda20f5e6e39b93d1103e4b37495db38aa android.hardware.radio@1.5::IRadio
fcbb0742a88215ee7a6d7ce0825d253eb2b50391fc6c8c48667f9fd7f6d4549e android.hardware.radio@1.5::IRadioIndication
b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardware.radio@1.5::IRadioResponse
-e7669bddacbdaee2cd9a87762a13fb7648639eead54bf4d767dc06eaaeb35736 android.hardware.radio@1.5::types
+6b8dcd5e3e33a524cc7ebb14671a76ad3a2d333467397ce82acc4024346386f8 android.hardware.radio@1.5::types
3ca6616381080bdd6c08141ad12775a94ae868c58b02b1274ae3326f7de724ab android.hardware.sensors@2.1::ISensors
3d4141c6373cd9ca02fe221a7d12343840de2255d032c38248fe8e35816b58b2 android.hardware.sensors@2.1::ISensorsCallback
8051cc50fc90ed447f058a8b15d81f35a65f1bd9004b1de4f127edeb89b47978 android.hardware.sensors@2.1::types
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index e28605d..4ab228f 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -125,7 +125,9 @@
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
- const Request request = createRequest(testModel);
+
+ ExecutionContext context;
+ const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -143,7 +145,7 @@
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 0dba85a..3613e69 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -21,10 +21,13 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
#include <android/hidl/allocator/1.0/IAllocator.h>
#include <android/hidl/memory/1.0/IMemory.h>
#include <hidlmemory/mapping.h>
+#include <vndk/hardware_buffer.h>
+#include <gtest/gtest.h>
#include <algorithm>
#include <iostream>
#include <vector>
@@ -37,10 +40,64 @@
using V1_0::Request;
using V1_0::RequestArgument;
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
+std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
+ auto ashmem = std::make_unique<TestAshmem>(size);
+ return ashmem->mIsValid ? std::move(ashmem) : nullptr;
+}
-Request createRequest(const TestModel& testModel) {
+void TestAshmem::initialize(uint32_t size) {
+ mIsValid = false;
+ ASSERT_GT(size, 0);
+ mHidlMemory = nn::allocateSharedMemory(size);
+ ASSERT_TRUE(mHidlMemory.valid());
+ mMappedMemory = mapMemory(mHidlMemory);
+ ASSERT_NE(mMappedMemory, nullptr);
+ mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
+ ASSERT_NE(mPtr, nullptr);
+ mIsValid = true;
+}
+
+std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
+ auto ahwb = std::make_unique<TestBlobAHWB>(size);
+ return ahwb->mIsValid ? std::move(ahwb) : nullptr;
+}
+
+void TestBlobAHWB::initialize(uint32_t size) {
+ mIsValid = false;
+ ASSERT_GT(size, 0);
+ const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+ const AHardwareBuffer_Desc desc = {
+ .width = size,
+ .height = 1,
+ .layers = 1,
+ .format = AHARDWAREBUFFER_FORMAT_BLOB,
+ .usage = usage,
+ .stride = size,
+ };
+ ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
+ ASSERT_NE(mAhwb, nullptr);
+
+ void* buffer = nullptr;
+ ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
+ ASSERT_NE(buffer, nullptr);
+ mPtr = static_cast<uint8_t*>(buffer);
+
+ const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
+ ASSERT_NE(handle, nullptr);
+ mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+ mIsValid = true;
+}
+
+TestBlobAHWB::~TestBlobAHWB() {
+ if (mAhwb) {
+ AHardwareBuffer_unlock(mAhwb, nullptr);
+ AHardwareBuffer_release(mAhwb);
+ }
+}
+
+Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
+ CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
+
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
size_t inputSize = 0;
@@ -80,16 +137,19 @@
}
// Allocate memory pools.
- hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
- nn::allocateSharedMemory(outputSize)};
- CHECK_NE(pools[kInputPoolIndex].size(), 0u);
- CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
- sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
- CHECK(inputMemory.get() != nullptr);
- uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
- CHECK(inputPtr != nullptr);
+ if (memoryType == MemoryType::ASHMEM) {
+ mInputMemory = TestAshmem::create(inputSize);
+ mOutputMemory = TestAshmem::create(outputSize);
+ } else {
+ mInputMemory = TestBlobAHWB::create(inputSize);
+ mOutputMemory = TestBlobAHWB::create(outputSize);
+ }
+ EXPECT_NE(mInputMemory, nullptr);
+ EXPECT_NE(mOutputMemory, nullptr);
+ hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
// Copy input data to the memory pool.
+ uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
if (op.data.size() > 0) {
@@ -102,18 +162,13 @@
return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
}
-std::vector<TestBuffer> getOutputBuffers(const Request& request) {
- sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
- CHECK(outputMemory.get() != nullptr);
- uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
- CHECK(outputPtr != nullptr);
-
+std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
// Copy out output results.
+ uint8_t* outputPtr = mOutputMemory->getPointer();
std::vector<TestBuffer> outputBuffers;
for (const auto& output : request.outputs) {
outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
}
-
return outputBuffers;
}
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index cb22250..7f7dac0 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -129,7 +129,8 @@
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 6d4534c..3292f79 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -19,6 +19,8 @@
#include <android-base/logging.h>
#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
#include <algorithm>
#include <iosfwd>
#include <string>
@@ -28,11 +30,73 @@
namespace android::hardware::neuralnetworks {
-// Create HIDL Request from the TestModel struct.
-V1_0::Request createRequest(const test_helper::TestModel& testModel);
+// Convenience class to manage the lifetime of memory resources.
+class TestMemoryBase {
+ DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
-// After execution, copy out output results from the output memory pool.
-std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
+ public:
+ TestMemoryBase() = default;
+ virtual ~TestMemoryBase() = default;
+ uint8_t* getPointer() const { return mPtr; }
+ hidl_memory getHidlMemory() const { return mHidlMemory; }
+
+ protected:
+ uint8_t* mPtr = nullptr;
+ hidl_memory mHidlMemory;
+ bool mIsValid = false;
+};
+
+class TestAshmem : public TestMemoryBase {
+ public:
+ static std::unique_ptr<TestAshmem> create(uint32_t size);
+
+ // Prefer TestAshmem::create.
+ // The constructor calls initialize, which constructs the memory resources. This is a workaround
+ // that gtest macros cannot be used directly in a constructor.
+ TestAshmem(uint32_t size) { initialize(size); }
+
+ private:
+ void initialize(uint32_t size);
+ sp<hidl::memory::V1_0::IMemory> mMappedMemory;
+};
+
+class TestBlobAHWB : public TestMemoryBase {
+ public:
+ static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
+
+ // Prefer TestBlobAHWB::create.
+ // The constructor calls initialize, which constructs the memory resources. This is a
+ // workaround that gtest macros cannot be used directly in a constructor.
+ TestBlobAHWB(uint32_t size) { initialize(size); }
+ ~TestBlobAHWB();
+
+ private:
+ void initialize(uint32_t size);
+ AHardwareBuffer* mAhwb = nullptr;
+};
+
+enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
+
+// Manages the lifetime of memory resources used in an execution.
+class ExecutionContext {
+ DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
+
+ public:
+ static constexpr uint32_t kInputPoolIndex = 0;
+ static constexpr uint32_t kOutputPoolIndex = 1;
+
+ ExecutionContext() = default;
+
+ // Create HIDL Request from the TestModel struct.
+ V1_0::Request createRequest(const test_helper::TestModel& testModel,
+ MemoryType memoryType = MemoryType::ASHMEM);
+
+ // After execution, copy out output results from the output memory pool.
+ std::vector<test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request) const;
+
+ private:
+ std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+};
// Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
// so this is efficiently accomplished by moving the element to the end and
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index cee15a3..14d300d 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -133,7 +133,9 @@
// Test driver for those generated from ml/nn/runtime/test/spec
void Execute(const sp<IDevice>& device, const TestModel& testModel) {
const Model model = createModel(testModel);
- const Request request = createRequest(testModel);
+
+ ExecutionContext context;
+ const Request request = context.createRequest(testModel);
// Create IPreparedModel.
sp<IPreparedModel> preparedModel;
@@ -151,7 +153,7 @@
ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index d56d40b..04af6ec 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -132,7 +132,8 @@
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
ASSERT_FALSE(kTestModel.expectFailure);
validateEverything(kDevice, model, request);
}
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 3ab0135..aaaafc7 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -68,6 +68,7 @@
Executor executor;
MeasureTiming measureTiming;
OutputType outputType;
+ MemoryType memoryType;
};
} // namespace
@@ -216,7 +217,8 @@
return;
}
- Request request = createRequest(testModel);
+ ExecutionContext context;
+ Request request = context.createRequest(testModel, testConfig.memoryType);
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -326,7 +328,7 @@
}
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -337,24 +339,30 @@
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
+ std::vector<MemoryType> memoryTypeList;
if (testDynamicOutputShape) {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM};
} else {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM, MemoryType::BLOB_AHWB};
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig = {.executor = executor,
- .measureTiming = measureTiming,
- .outputType = outputType};
- EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ for (const MemoryType memoryType : memoryTypeList) {
+ const TestConfig testConfig = {.executor = executor,
+ .measureTiming = measureTiming,
+ .outputType = outputType,
+ .memoryType = memoryType};
+ EvaluatePreparedModel(preparedModel, testModel, testConfig);
+ }
}
}
}
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 4fbd0e2..5853fa4 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -153,7 +153,8 @@
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = createRequest(kTestModel);
+ ExecutionContext context;
+ const Request request = context.createRequest(kTestModel);
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index ff21960..ff71778 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -74,17 +74,8 @@
enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
-enum class MemoryType { SHARED, DEVICE };
-
enum class IOType { INPUT, OUTPUT };
-static void waitForSyncFence(int syncFd) {
- constexpr int kInfiniteTimeout = -1;
- ASSERT_GT(syncFd, 0);
- int r = sync_wait(syncFd, kInfiniteTimeout);
- ASSERT_GE(r, 0);
-}
-
struct TestConfig {
Executor executor;
MeasureTiming measureTiming;
@@ -275,6 +266,13 @@
} // namespace
+void waitForSyncFence(int syncFd) {
+ constexpr int kInfiniteTimeout = -1;
+ ASSERT_GT(syncFd, 0);
+ int r = sync_wait(syncFd, kInfiniteTimeout);
+ ASSERT_GE(r, 0);
+}
+
Model createModel(const TestModel& testModel) {
uint32_t constCopySize = 0;
uint32_t constRefSize = 0;
@@ -336,21 +334,39 @@
}
}
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
-constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+class ExecutionContextV1_3 {
+ public:
+ ExecutionContextV1_3(sp<IDevice> device, sp<IPreparedModel> preparedModel)
+ : kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
-static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
- const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
- const TestModel& testModel, bool preferDeviceMemory) {
+ std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
+ std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
+ const Request& request) const;
+
+ private:
+ // Get a TestBuffer with data copied from an IBuffer object.
+ void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) const;
+
+ static constexpr uint32_t kInputPoolIndex = 0;
+ static constexpr uint32_t kOutputPoolIndex = 1;
+ static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+
+ const sp<IDevice> kDevice;
+ const sp<IPreparedModel> kPreparedModel;
+ std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+ std::vector<sp<IBuffer>> mBuffers;
+};
+
+std::optional<Request> ExecutionContextV1_3::createRequest(const TestModel& testModel,
+ MemoryType memoryType) {
// Memory pools are organized as:
// - 0: Input shared memory pool
// - 1: Output shared memory pool
// - [2, 2+i): Input device memories
// - [2+i, 2+i+o): Output device memories
- DeviceMemoryAllocator allocator(device, preparedModel, testModel);
- std::vector<sp<IBuffer>> buffers;
+ DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
std::vector<uint32_t> tokens;
+ mBuffers.clear();
// Model inputs.
hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
@@ -361,13 +377,13 @@
// Omitted input.
inputs[i] = {.hasNoValue = true};
continue;
- } else if (preferDeviceMemory) {
+ } else if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Input index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
if (buffer != nullptr) {
- DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+ DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
- buffers.push_back(std::move(buffer));
+ mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -387,13 +403,13 @@
size_t outputSize = 0;
for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
- if (preferDeviceMemory) {
+ if (memoryType == MemoryType::DEVICE) {
SCOPED_TRACE("Output index = " + std::to_string(i));
auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
if (buffer != nullptr) {
- DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+ DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
kDeviceMemoryBeginIndex)};
- buffers.push_back(std::move(buffer));
+ mBuffers.push_back(std::move(buffer));
tokens.push_back(token);
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
continue;
@@ -416,21 +432,29 @@
outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
}
+ if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
+ return std::nullopt;
+ }
+
// Memory pools.
- hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
- pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
- pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
- CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
- CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
- for (uint32_t i = 0; i < buffers.size(); i++) {
+ hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + mBuffers.size());
+ if (memoryType == MemoryType::BLOB_AHWB) {
+ mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
+ mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
+ } else {
+ mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1));
+ mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1));
+ }
+ EXPECT_NE(mInputMemory, nullptr);
+ EXPECT_NE(mOutputMemory, nullptr);
+ pools[kInputPoolIndex].hidlMemory(mInputMemory->getHidlMemory());
+ pools[kOutputPoolIndex].hidlMemory(mOutputMemory->getHidlMemory());
+ for (uint32_t i = 0; i < mBuffers.size(); i++) {
pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
}
// Copy input data to the input shared memory pool.
- sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
- CHECK(inputMemory.get() != nullptr);
- uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
- CHECK(inputPtr != nullptr);
+ uint8_t* inputPtr = mInputMemory->getPointer();
for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
@@ -439,14 +463,38 @@
std::copy(begin, end, inputPtr + inputs[i].location.offset);
}
}
-
- Request request = {
+ return Request{
.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
- return {std::move(request), std::move(buffers)};
+}
+
+std::vector<TestBuffer> ExecutionContextV1_3::getOutputBuffers(const TestModel& testModel,
+ const Request& request) const {
+ // Copy out output results.
+ uint8_t* outputPtr = mOutputMemory->getPointer();
+ std::vector<TestBuffer> outputBuffers;
+ for (uint32_t i = 0; i < request.outputs.size(); i++) {
+ const auto& outputLoc = request.outputs[i].location;
+ if (outputLoc.poolIndex == kOutputPoolIndex) {
+ outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
+ } else {
+ const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
+ if (op.data.size() == 0) {
+ outputBuffers.emplace_back(0, nullptr);
+ } else {
+ SCOPED_TRACE("Output index = " + std::to_string(i));
+ const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
+ TestBuffer buffer;
+ getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
+ outputBuffers.push_back(std::move(buffer));
+ }
+ }
+ }
+ return outputBuffers;
}
// Get a TestBuffer with data copied from an IBuffer object.
-static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
+void ExecutionContextV1_3::getBuffer(const sp<IBuffer>& buffer, size_t size,
+ TestBuffer* testBuffer) const {
// IBuffer -> Shared memory.
hidl_memory tmp = nn::allocateSharedMemory(size);
const auto ret = buffer->copyTo(tmp);
@@ -462,35 +510,6 @@
*testBuffer = TestBuffer(size, outputPtr);
}
-static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
- const std::vector<sp<IBuffer>>& buffers) {
- sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
- CHECK(outputMemory.get() != nullptr);
- uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
- CHECK(outputPtr != nullptr);
-
- // Copy out output results.
- std::vector<TestBuffer> outputBuffers;
- for (uint32_t i = 0; i < request.outputs.size(); i++) {
- const auto& outputLoc = request.outputs[i].location;
- if (outputLoc.poolIndex == kOutputPoolIndex) {
- outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
- } else {
- const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
- if (op.data.size() == 0) {
- outputBuffers.emplace_back();
- } else {
- SCOPED_TRACE("Output index = " + std::to_string(i));
- const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
- TestBuffer buffer;
- getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
- outputBuffers.push_back(std::move(buffer));
- }
- }
- }
- return outputBuffers;
-}
-
static bool hasZeroSizedOutput(const TestModel& testModel) {
return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
[&testModel](uint32_t index) {
@@ -541,13 +560,14 @@
return;
}
- auto [request, buffers] =
- createRequest(device, preparedModel, testModel,
- /*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
+ ExecutionContextV1_3 context(device, preparedModel);
+ auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
// Skip if testing memory domain but no device memory has been allocated.
- if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
+ if (!maybeRequest.has_value()) {
return;
}
+
+ Request request = std::move(maybeRequest.value());
if (testConfig.outputType == OutputType::INSUFFICIENT) {
makeOutputInsufficientSize(/*outputIndex=*/0, &request);
}
@@ -742,7 +762,7 @@
}
// Retrieve execution results.
- const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
// We want "close-enough" results.
checkResults(testModel, outputs);
@@ -753,29 +773,32 @@
std::vector<OutputType> outputTypesList;
std::vector<MeasureTiming> measureTimingList;
std::vector<Executor> executorList;
- MemoryType memoryType = MemoryType::SHARED;
+ std::vector<MemoryType> memoryTypeList;
switch (testKind) {
case TestKind::GENERAL: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::DYNAMIC_SHAPE: {
outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::MEMORY_DOMAIN: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO};
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
- memoryType = MemoryType::DEVICE;
+ memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
} break;
case TestKind::FENCED_COMPUTE: {
outputTypesList = {OutputType::FULLY_SPECIFIED};
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
executorList = {Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
case TestKind::QUANTIZATION_COUPLING: {
LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
@@ -786,14 +809,17 @@
measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
// Burst does not support V1_3 loop timeout.
executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
+ memoryTypeList = {MemoryType::ASHMEM};
} break;
}
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
- EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ for (const MemoryType memoryType : memoryTypeList) {
+ const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
+ EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+ }
}
}
}
@@ -812,7 +838,7 @@
for (const OutputType outputType : outputTypesList) {
for (const MeasureTiming measureTiming : measureTimingList) {
for (const Executor executor : executorList) {
- const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
+ const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
/*reportSkipping=*/false);
bool baseSkipped = false;
EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 834d335..38d6486 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -77,6 +77,8 @@
void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
const test_helper::TestModel& testModel, TestKind testKind);
+void waitForSyncFence(int syncFd);
+
} // namespace android::hardware::neuralnetworks::V1_3::vts::functional
#endif // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp
index 08c1b35..3c0c885 100644
--- a/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp
@@ -864,6 +864,9 @@
case Executor::SYNC:
EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
break;
+ case Executor::FENCED:
+ EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
+ break;
default:
ASSERT_TRUE(false);
}
@@ -912,7 +915,38 @@
return executionStatus;
}
- // TODO(xusongw): Add executeFenced.
+ ErrorStatus executeFenced(const sp<IPreparedModel>& preparedModel, const Request& request) {
+ ErrorStatus executionStatus;
+ hidl_handle syncFenceHandle;
+ sp<IFencedExecutionCallback> fencedCallback;
+ const auto callbackFunc = [&executionStatus, &syncFenceHandle, &fencedCallback](
+ ErrorStatus error, const hidl_handle& handle,
+ const sp<IFencedExecutionCallback>& callback) {
+ executionStatus = error;
+ syncFenceHandle = handle;
+ fencedCallback = callback;
+ };
+ Return<void> ret = preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {},
+ callbackFunc);
+ EXPECT_TRUE(ret.isOk());
+ if (executionStatus != ErrorStatus::NONE) {
+ EXPECT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
+ EXPECT_EQ(fencedCallback, nullptr);
+ return executionStatus;
+ }
+ if (syncFenceHandle.getNativeHandle()) {
+ waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]);
+ }
+ EXPECT_NE(fencedCallback, nullptr);
+ ret = fencedCallback->getExecutionInfo(
+ [&executionStatus](ErrorStatus error, Timing t, Timing) {
+ executionStatus = error;
+ EXPECT_EQ(UINT64_MAX, t.timeOnDevice);
+ EXPECT_EQ(UINT64_MAX, t.timeInDriver);
+ });
+ EXPECT_TRUE(ret.isOk());
+ return executionStatus;
+ }
const Executor kExecutor = std::get<Executor>(GetParam());
};
@@ -1111,6 +1145,9 @@
}
TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
+ // FENCED execution does not support dynamic shape.
+ if (kExecutor == Executor::FENCED) return;
+
TestOperand testOperand = kTestOperand;
testOperand.dimensions[0] = 0;
auto preparedModel = createConvPreparedModel(testOperand);
@@ -1148,7 +1185,7 @@
ErrorStatus::GENERAL_FAILURE);
}
-const auto kExecutorChoices = testing::Values(Executor::ASYNC, Executor::SYNC);
+const auto kExecutorChoices = testing::Values(Executor::ASYNC, Executor::SYNC, Executor::FENCED);
std::string printMemoryDomainExecutionTest(
const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
index 879989e..2ef1e8f 100644
--- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
@@ -214,7 +214,8 @@
}
void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
+ const Request& request, const ExecutionContext& context, bool synchronous,
+ DeadlineBoundType deadlineBound) {
const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
const auto deadline = makeDeadline(deadlineBound);
@@ -261,7 +262,7 @@
// Retrieve execution results.
ASSERT_TRUE(nn::compliantWithV1_0(request));
const V1_0::Request request10 = nn::convertToV1_0(request);
- const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
+ const std::vector<TestBuffer> outputs = context.getOutputBuffers(request10);
// We want "close-enough" results.
if (status == ErrorStatus::NONE) {
@@ -270,10 +271,11 @@
}
void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
- const Request& request) {
+ const Request& request, const ExecutionContext& context) {
for (bool synchronous : {false, true}) {
for (auto deadlineBound : deadlineBounds) {
- runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound);
+ runExecutionTest(preparedModel, testModel, request, context, synchronous,
+ deadlineBound);
}
}
}
@@ -291,8 +293,9 @@
if (preparedModel == nullptr) return;
// run execution tests
- const Request request = nn::convertToV1_3(createRequest(testModel));
- runExecutionTests(preparedModel, testModel, request);
+ ExecutionContext context;
+ const Request request = nn::convertToV1_3(context.createRequest(testModel));
+ runExecutionTests(preparedModel, testModel, request, context);
}
class DeadlineTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 60ceb7e..f7bd624 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -177,7 +177,8 @@
TEST_P(ValidationTest, Test) {
const Model model = createModel(kTestModel);
- const Request request = nn::convertToV1_3(createRequest(kTestModel));
+ ExecutionContext context;
+ const Request request = nn::convertToV1_3(context.createRequest(kTestModel));
if (kTestModel.expectFailure) {
validateFailure(kDevice, model, request);
} else {
diff --git a/radio/1.5/types.hal b/radio/1.5/types.hal
index 45f3b90..248f56e 100644
--- a/radio/1.5/types.hal
+++ b/radio/1.5/types.hal
@@ -1023,16 +1023,37 @@
* 3GPP2 C.S0068-0.
*/
enum PersoSubstate : @1.0::PersoSubstate {
+ /**
+ * The device is personalized using the content of the Service Provider Name (SPN) in the SIM
+ * card.
+ */
SIM_SPN,
SIM_SPN_PUK,
- /** Equivalent Home PLMN */
+ /**
+ * Service Provider and Equivalent Home PLMN
+ * The device is personalized using both the content of the GID1 (equivalent to service provider
+ * personalization) and the content of the Equivalent Home PLMN (EHPLMN) in the SIM card.
+ * If the GID1 in the SIM is absent, then just the content of the Equivalent Home PLMN
+ * is matched.
+ */
SIM_SP_EHPLMN,
SIM_SP_EHPLMN_PUK,
+ /**
+ * Device is personalized using the first digits of the ICCID of the SIM card.
+ */
SIM_ICCID,
SIM_ICCID_PUK,
+ /**
+ * Device is personalized using the content of the IMPI in the ISIM.
+ */
SIM_IMPI,
SIM_IMPI_PUK,
- /** Network subset service provider */
+ /**
+ * Network Subset and Service Provider
+ * Device is personalized using both the content of GID1 (equivalent to service provider
+ * personalization) and the first digits of the IMSI (equivalent to network subset
+ * personalization).
+ */
SIM_NS_SP,
SIM_NS_SP_PUK,
};