Merge "graphics common: fix AIDL BufferUsage calculation" into rvc-dev
diff --git a/audio/core/all-versions/default/StreamOut.cpp b/audio/core/all-versions/default/StreamOut.cpp
index 150d641..1519c48 100644
--- a/audio/core/all-versions/default/StreamOut.cpp
+++ b/audio/core/all-versions/default/StreamOut.cpp
@@ -455,20 +455,22 @@
     sp<IStreamOutCallback> callback = self->mCallback;
     if (callback.get() == nullptr) return 0;
     ALOGV("asyncCallback() event %d", event);
+    Return<void> result;
     switch (event) {
         case STREAM_CBK_EVENT_WRITE_READY:
-            callback->onWriteReady();
+            result = callback->onWriteReady();
             break;
         case STREAM_CBK_EVENT_DRAIN_READY:
-            callback->onDrainReady();
+            result = callback->onDrainReady();
             break;
         case STREAM_CBK_EVENT_ERROR:
-            callback->onError();
+            result = callback->onError();
             break;
         default:
             ALOGW("asyncCallback() unknown event %d", event);
             break;
     }
+    ALOGW_IF(!result.isOk(), "Client callback failed: %s", result.description().c_str());
     return 0;
 }
 
@@ -629,16 +631,18 @@
     sp<IStreamOutEventCallback> eventCallback = self->mEventCallback;
     if (eventCallback.get() == nullptr) return 0;
     ALOGV("%s event %d", __func__, event);
+    Return<void> result;
     switch (event) {
         case STREAM_EVENT_CBK_TYPE_CODEC_FORMAT_CHANGED: {
             hidl_vec<uint8_t> audioMetadata;
             audioMetadata.setToExternal((uint8_t*)param, strlen((char*)param));
-            eventCallback->onCodecFormatChanged(audioMetadata);
+            result = eventCallback->onCodecFormatChanged(audioMetadata);
         } break;
         default:
             ALOGW("%s unknown event %d", __func__, event);
             break;
     }
+    ALOGW_IF(!result.isOk(), "Client callback failed: %s", result.description().c_str());
     return 0;
 }
 #endif
diff --git a/audio/effect/all-versions/vts/functional/ValidateAudioEffectsConfiguration.cpp b/audio/effect/all-versions/vts/functional/ValidateAudioEffectsConfiguration.cpp
index f9e4aa3..9c0135b 100644
--- a/audio/effect/all-versions/vts/functional/ValidateAudioEffectsConfiguration.cpp
+++ b/audio/effect/all-versions/vts/functional/ValidateAudioEffectsConfiguration.cpp
@@ -18,6 +18,12 @@
 #include <iterator>
 
 #include <media/EffectsConfig.h>
+// clang-format off
+#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
+// clang-format on
+
+#include <gtest/gtest.h>
+#include <hidl/ServiceManagement.h>
 
 #include "utility/ValidateXml.h"
 
@@ -29,6 +35,11 @@
     RecordProperty("description",
                    "Verify that the effects configuration file is valid according to the schema");
     using namespace android::effectsConfig;
+    if (android::hardware::getAllHalInstanceNames(
+                ::android::hardware::audio::effect::CPP_VERSION::IEffectsFactory::descriptor)
+                .size() == 0) {
+        GTEST_SKIP() << "No Effects HAL version " STRINGIFY(CPP_VERSION) " on this device";
+    }
 
     std::vector<const char*> locations(std::begin(DEFAULT_LOCATIONS), std::end(DEFAULT_LOCATIONS));
     const char* xsd = "/data/local/tmp/audio_effects_conf_" STRINGIFY(CPP_VERSION) ".xsd";
diff --git a/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp b/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
index 390d4ee..070242f 100644
--- a/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
+++ b/audio/effect/all-versions/vts/functional/VtsHalAudioEffectTargetTest.cpp
@@ -155,11 +155,20 @@
     0xfe3199be, 0xaed0, 0x413f, 0x87bb,
     std::array<uint8_t, 6>{{0x11, 0x26, 0x0e, 0xb6, 0x3c, 0xf1}}};
 
+enum { PARAM_FACTORY_NAME, PARAM_EFFECT_UUID };
+using EffectParameter = std::tuple<std::string, Uuid>;
+
+static inline std::string EffectParameterToString(
+        const ::testing::TestParamInfo<EffectParameter>& info) {
+    return ::android::hardware::PrintInstanceNameToString(::testing::TestParamInfo<std::string>{
+            std::get<PARAM_FACTORY_NAME>(info.param), info.index});
+}
+
 // The main test class for Audio Effect HIDL HAL.
-class AudioEffectHidlTest : public ::testing::TestWithParam<std::string> {
+class AudioEffectHidlTest : public ::testing::TestWithParam<EffectParameter> {
   public:
     void SetUp() override {
-        effectsFactory = IEffectsFactory::getService(GetParam());
+        effectsFactory = IEffectsFactory::getService(std::get<PARAM_FACTORY_NAME>(GetParam()));
         ASSERT_NE(nullptr, effectsFactory.get());
 
         findAndCreateEffect(getEffectType());
@@ -180,7 +189,7 @@
         RecordProperty("description", description);
     }
 
-    virtual Uuid getEffectType() { return EQUALIZER_EFFECT_TYPE; }
+    Uuid getEffectType() const { return std::get<PARAM_EFFECT_UUID>(GetParam()); }
 
     void findAndCreateEffect(const Uuid& type);
     void findEffectInstance(const Uuid& type, Uuid* uuid);
@@ -369,7 +378,9 @@
     description("Verify Disable -> Enable -> Disable sequence for an effect");
     Return<Result> ret = effect->disable();
     EXPECT_TRUE(ret.isOk());
-    EXPECT_EQ(Result::INVALID_ARGUMENTS, ret);
+    // Note: some legacy effects may return -EINVAL (INVALID_ARGUMENTS),
+    //       more canonical is to return -ENOSYS (NOT_SUPPORTED)
+    EXPECT_TRUE(ret == Result::NOT_SUPPORTED || ret == Result::INVALID_ARGUMENTS);
     ret = effect->enable();
     EXPECT_TRUE(ret.isOk());
     EXPECT_EQ(Result::OK, ret);
@@ -519,15 +530,19 @@
 
 // The main test class for Equalizer Audio Effect HIDL HAL.
 class EqualizerAudioEffectHidlTest : public AudioEffectHidlTest {
-   public:
+  public:
     void SetUp() override {
         AudioEffectHidlTest::SetUp();
         equalizer = IEqualizerEffect::castFrom(effect);
         ASSERT_NE(nullptr, equalizer.get());
     }
 
-   protected:
-    Uuid getEffectType() override { return EQUALIZER_EFFECT_TYPE; }
+    void TearDown() override {
+        equalizer.clear();
+        AudioEffectHidlTest::TearDown();
+    }
+
+  protected:
     void getNumBands(uint16_t* numBands);
     void getLevelRange(int16_t* minLevel, int16_t* maxLevel);
     void getBandFrequencyRange(uint16_t band, uint32_t* minFreq, uint32_t* centerFreq,
@@ -765,16 +780,19 @@
 
 // The main test class for Equalizer Audio Effect HIDL HAL.
 class LoudnessEnhancerAudioEffectHidlTest : public AudioEffectHidlTest {
-   public:
+  public:
     void SetUp() override {
         AudioEffectHidlTest::SetUp();
         enhancer = ILoudnessEnhancerEffect::castFrom(effect);
         ASSERT_NE(nullptr, enhancer.get());
     }
 
-   protected:
-    Uuid getEffectType() override { return LOUDNESS_ENHANCER_EFFECT_TYPE; }
+    void TearDown() override {
+        enhancer.clear();
+        AudioEffectHidlTest::TearDown();
+    }
 
+  protected:
     sp<ILoudnessEnhancerEffect> enhancer;
 };
 
@@ -799,19 +817,31 @@
     EXPECT_EQ(gain, actualGain);
 }
 
+INSTANTIATE_TEST_SUITE_P(EffectsFactory, AudioEffectsFactoryHidlTest,
+                         ::testing::ValuesIn(::android::hardware::getAllHalInstanceNames(
+                                 IEffectsFactory::descriptor)),
+                         ::android::hardware::PrintInstanceNameToString);
 INSTANTIATE_TEST_SUITE_P(
-        EffectsFactory, AudioEffectsFactoryHidlTest,
-        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IEffectsFactory::descriptor)),
-        android::hardware::PrintInstanceNameToString);
+        Equalizer_IEffect, AudioEffectHidlTest,
+        ::testing::Combine(::testing::ValuesIn(::android::hardware::getAllHalInstanceNames(
+                                   IEffectsFactory::descriptor)),
+                           ::testing::Values(EQUALIZER_EFFECT_TYPE)),
+        EffectParameterToString);
 INSTANTIATE_TEST_SUITE_P(
-        Equalizer, AudioEffectHidlTest,
-        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IEffectsFactory::descriptor)),
-        android::hardware::PrintInstanceNameToString);
+        LoudnessEnhancer_IEffect, AudioEffectHidlTest,
+        ::testing::Combine(::testing::ValuesIn(::android::hardware::getAllHalInstanceNames(
+                                   IEffectsFactory::descriptor)),
+                           ::testing::Values(LOUDNESS_ENHANCER_EFFECT_TYPE)),
+        EffectParameterToString);
 INSTANTIATE_TEST_SUITE_P(
         Equalizer, EqualizerAudioEffectHidlTest,
-        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IEffectsFactory::descriptor)),
-        android::hardware::PrintInstanceNameToString);
+        ::testing::Combine(::testing::ValuesIn(::android::hardware::getAllHalInstanceNames(
+                                   IEffectsFactory::descriptor)),
+                           ::testing::Values(EQUALIZER_EFFECT_TYPE)),
+        EffectParameterToString);
 INSTANTIATE_TEST_SUITE_P(
         LoudnessEnhancer, LoudnessEnhancerAudioEffectHidlTest,
-        testing::ValuesIn(android::hardware::getAllHalInstanceNames(IEffectsFactory::descriptor)),
-        android::hardware::PrintInstanceNameToString);
+        ::testing::Combine(::testing::ValuesIn(::android::hardware::getAllHalInstanceNames(
+                                   IEffectsFactory::descriptor)),
+                           ::testing::Values(LOUDNESS_ENHANCER_EFFECT_TYPE)),
+        EffectParameterToString);
diff --git a/camera/device/3.6/default/ExternalCameraDeviceSession.cpp b/camera/device/3.6/default/ExternalCameraDeviceSession.cpp
index 60a1a10..8fd8e58 100644
--- a/camera/device/3.6/default/ExternalCameraDeviceSession.cpp
+++ b/camera/device/3.6/default/ExternalCameraDeviceSession.cpp
@@ -272,16 +272,17 @@
 
     // convert hal requests to offline request
     std::deque<std::shared_ptr<HalRequest>> offlineReqs(halReqs.size());
+    size_t i = 0;
     for (auto& v4lReq : halReqs) {
-        std::shared_ptr<HalRequest> halReq = std::make_shared<HalRequest>();
-        halReq->frameNumber = v4lReq->frameNumber;
-        halReq->setting = v4lReq->setting;
-        halReq->shutterTs = v4lReq->shutterTs;
-        halReq->buffers = v4lReq->buffers;
+        offlineReqs[i] = std::make_shared<HalRequest>();
+        offlineReqs[i]->frameNumber = v4lReq->frameNumber;
+        offlineReqs[i]->setting = v4lReq->setting;
+        offlineReqs[i]->shutterTs = v4lReq->shutterTs;
+        offlineReqs[i]->buffers = v4lReq->buffers;
         sp<V3_4::implementation::V4L2Frame> v4l2Frame =
                 static_cast<V3_4::implementation::V4L2Frame*>(v4lReq->frameIn.get());
-        halReq->frameIn = new AllocatedV4L2Frame(v4l2Frame);
-        offlineReqs.push_back(halReq);
+        offlineReqs[i]->frameIn = new AllocatedV4L2Frame(v4l2Frame);
+        i++;
         // enqueue V4L2 frame
         enqueueV4l2Frame(v4l2Frame);
     }
diff --git a/camera/metadata/3.5/types.hal b/camera/metadata/3.5/types.hal
index 4e2252c..99d6115 100644
--- a/camera/metadata/3.5/types.hal
+++ b/camera/metadata/3.5/types.hal
@@ -35,28 +35,29 @@
  * '/system/media/camera/docs/docs.html' in the corresponding Android source tree.</p>
  */
 enum CameraMetadataTag : @3.4::CameraMetadataTag {
-    /** android.control.availableBokehMaxSizes [static, int32[], ndk_public]
+    /** android.control.availableExtendedSceneModeMaxSizes [static, int32[], ndk_public]
      *
-     * <p>The list of bokeh modes for ANDROID_CONTROL_BOKEH_MODE that are supported by this camera
-     * device, and each bokeh mode's maximum streaming (non-stall) size with bokeh effect.</p>
+     * <p>The list of extended scene modes for ANDROID_CONTROL_EXTENDED_SCENE_MODE that are supported
+     * by this camera device, and each extended scene mode's maximum streaming (non-stall) size
+     * with  effect.</p>
      *
-     * @see ANDROID_CONTROL_BOKEH_MODE
+     * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
      */
-    ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES = android.hardware.camera.metadata@3.3::CameraMetadataTag:ANDROID_CONTROL_END_3_3,
+    ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES = android.hardware.camera.metadata@3.3::CameraMetadataTag:ANDROID_CONTROL_END_3_3,
 
-    /** android.control.availableBokehZoomRatioRanges [static, float[], ndk_public]
+    /** android.control.availableExtendedSceneModeZoomRatioRanges [static, float[], ndk_public]
      *
-     * <p>The ranges of supported zoom ratio for non-OFF ANDROID_CONTROL_BOKEH_MODE.</p>
+     * <p>The ranges of supported zoom ratio for non-DISABLED ANDROID_CONTROL_EXTENDED_SCENE_MODE.</p>
      *
-     * @see ANDROID_CONTROL_BOKEH_MODE
+     * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
      */
-    ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES,
+    ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
 
-    /** android.control.bokehMode [dynamic, enum, public]
+    /** android.control.extendedSceneMode [dynamic, enum, public]
      *
-     * <p>Whether bokeh mode is enabled for a particular capture request.</p>
+     * <p>Whether extended scene mode is enabled for a particular capture request.</p>
      */
-    ANDROID_CONTROL_BOKEH_MODE,
+    ANDROID_CONTROL_EXTENDED_SCENE_MODE,
 
     /** android.control.zoomRatioRange [static, float[], public]
      *
@@ -72,7 +73,7 @@
 
     ANDROID_CONTROL_END_3_5,
 
-    /** android.scaler.availableRotateAndCropModes [static, byte[], public]
+    /** android.scaler.availableRotateAndCropModes [static, byte[], hidden]
      *
      * <p>List of rotate-and-crop modes for ANDROID_SCALER_ROTATE_AND_CROP that are supported by this camera device.</p>
      *
@@ -80,7 +81,7 @@
      */
     ANDROID_SCALER_AVAILABLE_ROTATE_AND_CROP_MODES = android.hardware.camera.metadata@3.4::CameraMetadataTag:ANDROID_SCALER_END_3_4,
 
-    /** android.scaler.rotateAndCrop [dynamic, enum, public]
+    /** android.scaler.rotateAndCrop [dynamic, enum, hidden]
      *
      * <p>Whether a rotation-and-crop operation is applied to processed
      * outputs from the camera.</p>
@@ -95,13 +96,22 @@
  * Enumeration definitions for the various entries that need them
  */
 
-/** android.control.bokehMode enumeration values
- * @see ANDROID_CONTROL_BOKEH_MODE
+/** android.control.mode enumeration values added since v3.2
+ * @see ANDROID_CONTROL_MODE
  */
-enum CameraMetadataEnumAndroidControlBokehMode : uint32_t {
-    ANDROID_CONTROL_BOKEH_MODE_OFF,
-    ANDROID_CONTROL_BOKEH_MODE_STILL_CAPTURE,
-    ANDROID_CONTROL_BOKEH_MODE_CONTINUOUS,
+enum CameraMetadataEnumAndroidControlMode :
+        @3.2::CameraMetadataEnumAndroidControlMode {
+    ANDROID_CONTROL_MODE_USE_EXTENDED_SCENE_MODE,
+};
+
+/** android.control.extendedSceneMode enumeration values
+ * @see ANDROID_CONTROL_EXTENDED_SCENE_MODE
+ */
+enum CameraMetadataEnumAndroidControlExtendedSceneMode : uint32_t {
+    ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED                = 0,
+    ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_STILL_CAPTURE,
+    ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_CONTINUOUS,
+    ANDROID_CONTROL_EXTENDED_SCENE_MODE_VENDOR_START            = 0x40,
 };
 
 /** android.lens.poseReference enumeration values added since v3.3
diff --git a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
index c9d76da..05b8b47 100644
--- a/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
+++ b/camera/provider/2.4/vts/functional/VtsHalCameraProviderV2_4TargetTest.cpp
@@ -784,7 +784,7 @@
             const CameraMetadata& chars, int deviceVersion,
             const hidl_vec<hidl_string>& deviceNames);
     void verifyCameraCharacteristics(Status status, const CameraMetadata& chars);
-    void verifyBokehCharacteristics(const camera_metadata_t* metadata);
+    void verifyExtendedSceneModeCharacteristics(const camera_metadata_t* metadata);
     void verifyZoomCharacteristics(const camera_metadata_t* metadata);
     void verifyRecommendedConfigs(const CameraMetadata& metadata);
     void verifyMonochromeCharacteristics(const CameraMetadata& chars, int deviceVersion);
@@ -6588,77 +6588,98 @@
                 poseReference >= ANDROID_LENS_POSE_REFERENCE_PRIMARY_CAMERA);
     }
 
-    verifyBokehCharacteristics(metadata);
+    verifyExtendedSceneModeCharacteristics(metadata);
     verifyZoomCharacteristics(metadata);
 }
 
-void CameraHidlTest::verifyBokehCharacteristics(const camera_metadata_t* metadata) {
+void CameraHidlTest::verifyExtendedSceneModeCharacteristics(const camera_metadata_t* metadata) {
     camera_metadata_ro_entry entry;
     int retcode = 0;
 
+    retcode = find_camera_metadata_ro_entry(metadata, ANDROID_CONTROL_AVAILABLE_MODES, &entry);
+    if ((0 == retcode) && (entry.count > 0)) {
+        for (auto i = 0; i < entry.count; i++) {
+            ASSERT_TRUE(entry.data.u8[i] >= ANDROID_CONTROL_MODE_OFF &&
+                        entry.data.u8[i] <= ANDROID_CONTROL_MODE_USE_EXTENDED_SCENE_MODE);
+        }
+    } else {
+        ADD_FAILURE() << "Get camera controlAvailableModes failed!";
+    }
+
     // Check key availability in capabilities, request and result.
 
     retcode = find_camera_metadata_ro_entry(metadata,
             ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, &entry);
-    bool hasBokehRequestKey = false;
+    bool hasExtendedSceneModeRequestKey = false;
     if ((0 == retcode) && (entry.count > 0)) {
-        hasBokehRequestKey = std::find(entry.data.i32, entry.data.i32+entry.count,
-                ANDROID_CONTROL_BOKEH_MODE) != entry.data.i32+entry.count;
+        hasExtendedSceneModeRequestKey =
+                std::find(entry.data.i32, entry.data.i32 + entry.count,
+                          ANDROID_CONTROL_EXTENDED_SCENE_MODE) != entry.data.i32 + entry.count;
     } else {
         ADD_FAILURE() << "Get camera availableRequestKeys failed!";
     }
 
     retcode = find_camera_metadata_ro_entry(metadata,
             ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, &entry);
-    bool hasBokehResultKey = false;
+    bool hasExtendedSceneModeResultKey = false;
     if ((0 == retcode) && (entry.count > 0)) {
-        hasBokehResultKey = std::find(entry.data.i32, entry.data.i32+entry.count,
-                ANDROID_CONTROL_BOKEH_MODE) != entry.data.i32+entry.count;
+        hasExtendedSceneModeResultKey =
+                std::find(entry.data.i32, entry.data.i32 + entry.count,
+                          ANDROID_CONTROL_EXTENDED_SCENE_MODE) != entry.data.i32 + entry.count;
     } else {
         ADD_FAILURE() << "Get camera availableResultKeys failed!";
     }
 
     retcode = find_camera_metadata_ro_entry(metadata,
             ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, &entry);
-    bool hasBokehMaxSizesKey = false;
-    bool hasBokehZoomRatioRangesKey = false;
+    bool hasExtendedSceneModeMaxSizesKey = false;
+    bool hasExtendedSceneModeZoomRatioRangesKey = false;
     if ((0 == retcode) && (entry.count > 0)) {
-        hasBokehMaxSizesKey = std::find(entry.data.i32, entry.data.i32+entry.count,
-                ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES) != entry.data.i32+entry.count;
-        hasBokehZoomRatioRangesKey = std::find(entry.data.i32, entry.data.i32+entry.count,
-                ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES) != entry.data.i32+entry.count;
+        hasExtendedSceneModeMaxSizesKey =
+                std::find(entry.data.i32, entry.data.i32 + entry.count,
+                          ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES) !=
+                entry.data.i32 + entry.count;
+        hasExtendedSceneModeZoomRatioRangesKey =
+                std::find(entry.data.i32, entry.data.i32 + entry.count,
+                          ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES) !=
+                entry.data.i32 + entry.count;
     } else {
         ADD_FAILURE() << "Get camera availableCharacteristicsKeys failed!";
     }
 
     camera_metadata_ro_entry maxSizesEntry;
-    retcode = find_camera_metadata_ro_entry(metadata,
-            ANDROID_CONTROL_AVAILABLE_BOKEH_MAX_SIZES, &maxSizesEntry);
-    bool hasBokehMaxSizes = (0 == retcode && maxSizesEntry.count > 0);
+    retcode = find_camera_metadata_ro_entry(
+            metadata, ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_MAX_SIZES, &maxSizesEntry);
+    bool hasExtendedSceneModeMaxSizes = (0 == retcode && maxSizesEntry.count > 0);
 
     camera_metadata_ro_entry zoomRatioRangesEntry;
-    retcode = find_camera_metadata_ro_entry(metadata,
-            ANDROID_CONTROL_AVAILABLE_BOKEH_ZOOM_RATIO_RANGES, &zoomRatioRangesEntry);
-    bool hasBokehZoomRatioRanges = (0 == retcode && zoomRatioRangesEntry.count > 0);
+    retcode = find_camera_metadata_ro_entry(
+            metadata, ANDROID_CONTROL_AVAILABLE_EXTENDED_SCENE_MODE_ZOOM_RATIO_RANGES,
+            &zoomRatioRangesEntry);
+    bool hasExtendedSceneModeZoomRatioRanges = (0 == retcode && zoomRatioRangesEntry.count > 0);
 
-    // Bokeh keys must all be available, or all be unavailable.
-    bool noBokeh = !hasBokehRequestKey && !hasBokehResultKey && !hasBokehMaxSizesKey &&
-            !hasBokehZoomRatioRangesKey && !hasBokehMaxSizes && !hasBokehZoomRatioRanges;
-    if (noBokeh) {
+    // Extended scene mode keys must all be available, or all be unavailable.
+    bool noExtendedSceneMode =
+            !hasExtendedSceneModeRequestKey && !hasExtendedSceneModeResultKey &&
+            !hasExtendedSceneModeMaxSizesKey && !hasExtendedSceneModeZoomRatioRangesKey &&
+            !hasExtendedSceneModeMaxSizes && !hasExtendedSceneModeZoomRatioRanges;
+    if (noExtendedSceneMode) {
         return;
     }
-    bool hasBokeh = hasBokehRequestKey && hasBokehResultKey && hasBokehMaxSizesKey &&
-            hasBokehZoomRatioRangesKey && hasBokehMaxSizes && hasBokehZoomRatioRanges;
-    ASSERT_TRUE(hasBokeh);
+    bool hasExtendedSceneMode = hasExtendedSceneModeRequestKey && hasExtendedSceneModeResultKey &&
+                                hasExtendedSceneModeMaxSizesKey &&
+                                hasExtendedSceneModeZoomRatioRangesKey &&
+                                hasExtendedSceneModeMaxSizes && hasExtendedSceneModeZoomRatioRanges;
+    ASSERT_TRUE(hasExtendedSceneMode);
 
-    // Must have OFF, and must have one of STILL_CAPTURE and CONTINUOUS.
-    // Only valid combinations: {OFF, CONTINUOUS}, {OFF, STILL_CAPTURE}, and
-    // {OFF, CONTINUOUS, STILL_CAPTURE}.
+    // Must have DISABLED, and must have one of BOKEH_STILL_CAPTURE, BOKEH_CONTINUOUS, or a VENDOR
+    // mode.
     ASSERT_TRUE((maxSizesEntry.count == 6 && zoomRatioRangesEntry.count == 2) ||
             (maxSizesEntry.count == 9 && zoomRatioRangesEntry.count == 4));
-    bool hasOffMode = false;
-    bool hasStillCaptureMode = false;
-    bool hasContinuousMode = false;
+    bool hasDisabledMode = false;
+    bool hasBokehStillCaptureMode = false;
+    bool hasBokehContinuousMode = false;
+    bool hasVendorMode = false;
     std::vector<AvailableStream> outputStreams;
     ASSERT_EQ(Status::OK, getAvailableOutputStreams(metadata, outputStreams));
     for (int i = 0, j = 0; i < maxSizesEntry.count && j < zoomRatioRangesEntry.count; i += 3) {
@@ -6666,24 +6687,29 @@
         int32_t maxWidth = maxSizesEntry.data.i32[i+1];
         int32_t maxHeight = maxSizesEntry.data.i32[i+2];
         switch (mode) {
-            case ANDROID_CONTROL_BOKEH_MODE_OFF:
-                hasOffMode = true;
+            case ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED:
+                hasDisabledMode = true;
                 ASSERT_TRUE(maxWidth == 0 && maxHeight == 0);
                 break;
-            case ANDROID_CONTROL_BOKEH_MODE_STILL_CAPTURE:
-                hasStillCaptureMode = true;
+            case ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_STILL_CAPTURE:
+                hasBokehStillCaptureMode = true;
                 j += 2;
                 break;
-            case ANDROID_CONTROL_BOKEH_MODE_CONTINUOUS:
-                hasContinuousMode = true;
+            case ANDROID_CONTROL_EXTENDED_SCENE_MODE_BOKEH_CONTINUOUS:
+                hasBokehContinuousMode = true;
                 j += 2;
                 break;
             default:
-                ADD_FAILURE() << "Invalid bokehMode advertised: " << mode;
+                if (mode < ANDROID_CONTROL_EXTENDED_SCENE_MODE_VENDOR_START) {
+                    ADD_FAILURE() << "Invalid extended scene mode advertised: " << mode;
+                } else {
+                    hasVendorMode = true;
+                    j += 2;
+                }
                 break;
         }
 
-        if (mode != ANDROID_CONTROL_BOKEH_MODE_OFF) {
+        if (mode != ANDROID_CONTROL_EXTENDED_SCENE_MODE_DISABLED) {
             // Make sure size is supported.
             bool sizeSupported = false;
             for (const auto& stream : outputStreams) {
@@ -6703,8 +6729,8 @@
             ASSERT_LE(minZoomRatio, maxZoomRatio);
         }
     }
-    ASSERT_TRUE(hasOffMode);
-    ASSERT_TRUE(hasStillCaptureMode || hasContinuousMode);
+    ASSERT_TRUE(hasDisabledMode);
+    ASSERT_TRUE(hasBokehStillCaptureMode || hasBokehContinuousMode || hasVendorMode);
 }
 
 void CameraHidlTest::verifyZoomCharacteristics(const camera_metadata_t* metadata) {
diff --git a/camera/provider/2.6/ICameraProvider.hal b/camera/provider/2.6/ICameraProvider.hal
index 5651550..ed1d31d 100644
--- a/camera/provider/2.6/ICameraProvider.hal
+++ b/camera/provider/2.6/ICameraProvider.hal
@@ -76,6 +76,13 @@
      * configuration settings exposed through camera metadata), should the sum
      * of resource costs for the combination be <= 100.
      *
+     * The lists of camera id combinations returned by this method may contain
+     * hidden physical camera ids. If a combination does contain hidden physical
+     * camera ids, the camera framework must be able to open any logical cameras
+     * that contain these hidden physical camera ids in their
+     * ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS list, in addition to the other
+     * camera ids advertised in the combination, for concurrent operation.
+     *
      * @return status Status code for the operation
      * @return cameraIds a list of camera id combinations that support
      *         concurrent stream configurations with the minimum guarantees
diff --git a/compatibility_matrices/Android.bp b/compatibility_matrices/Android.bp
index 7883dd9..33157a6 100644
--- a/compatibility_matrices/Android.bp
+++ b/compatibility_matrices/Android.bp
@@ -83,8 +83,8 @@
         "compatibility_matrix.current.xml",
     ],
     kernel_configs: [
-        "kernel_config_current_4.14",
-        "kernel_config_current_4.19",
-        "kernel_config_current_5.4",
+        "kernel_config_r_4.14",
+        "kernel_config_r_4.19",
+        "kernel_config_r_5.4",
     ]
 }
diff --git a/compatibility_matrices/compatibility_matrix.4.xml b/compatibility_matrices/compatibility_matrix.4.xml
index 01ec172..e5e012c 100644
--- a/compatibility_matrices/compatibility_matrix.4.xml
+++ b/compatibility_matrices/compatibility_matrix.4.xml
@@ -181,12 +181,6 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.gnss</name>
-        <!--
-         - Both versions are listed here as a workaround for libvintf since 2.0 extends 1.1.
-         - Devices launched with Q must support gnss@2.0, see VtsTrebleVendorVintfTest
-         - test DeviceManifestTest#GnssHalVersionCompatibility.
-        -->
-        <version>1.1</version>
         <version>2.0</version>
         <interface>
             <name>IGnss</name>
@@ -429,7 +423,6 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.thermal</name>
-        <version>1.0-1</version>
         <version>2.0</version>
         <interface>
             <name>IThermal</name>
diff --git a/compatibility_matrices/compatibility_matrix.current.xml b/compatibility_matrices/compatibility_matrix.current.xml
index 51c6546..f28a8b2 100644
--- a/compatibility_matrices/compatibility_matrix.current.xml
+++ b/compatibility_matrices/compatibility_matrix.current.xml
@@ -53,7 +53,7 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.automotive.evs</name>
-        <version>1.0</version>
+        <version>1.0-1</version>
         <interface>
             <name>IEvsEnumerator</name>
             <instance>default</instance>
@@ -209,12 +209,6 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.gnss</name>
-        <!--
-         - Both versions are listed here as a workaround for libvintf since 2.0 extends 1.1.
-         - Devices launched with Q must support gnss@2.0, see VtsTrebleVendorVintfTest
-         - test DeviceManifestTest#GnssHalVersionCompatibility.
-        -->
-        <version>1.1</version>
         <version>2.0-1</version>
         <interface>
             <name>IGnss</name>
@@ -470,7 +464,6 @@
     </hal>
     <hal format="hidl" optional="true">
         <name>android.hardware.thermal</name>
-        <version>1.0</version>
         <version>2.0</version>
         <interface>
             <name>IThermal</name>
@@ -494,6 +487,14 @@
         </interface>
     </hal>
     <hal format="hidl" optional="true">
+        <name>android.hardware.tv.tuner</name>
+        <version>1.0</version>
+        <interface>
+            <name>ITuner</name>
+            <instance>default</instance>
+        </interface>
+    </hal>
+    <hal format="hidl" optional="true">
         <name>android.hardware.usb</name>
         <version>1.0-2</version>
         <interface>
diff --git a/current.txt b/current.txt
index c9cba70..47b7206 100644
--- a/current.txt
+++ b/current.txt
@@ -606,7 +606,6 @@
 5751f230e86a36111e7c5b995577cbf89d8df76c8e6c7641199198f3db3a93f7 android.hardware.wifi@1.3::IWifiStaIface
 
 # HALs released in Android R
-822369cf4dc16a6f6b9622bcf86cbdc0b692dc82193fc15e967767175cbfdd8f android.hardware.audio@6.0::types
 7241bd4596a927cd46d4b82f5e29e2cbe57f194aa1b25555f1d1d352e8b15c61 android.hardware.audio@6.0::IDevice
 2402876cbc23c0de3690a665eca84fd3857d1808dba5cad25ce272f81ecef8c9 android.hardware.audio@6.0::IDevicesFactory
 bca5379d5065e2e08b6ad7308ffc8a71a972fc0698bec678ea32eea786d01cb5 android.hardware.audio@6.0::IPrimaryDevice
@@ -615,8 +614,8 @@
 164826a380f4c1700183003f62d7532e367b67381c30ea44f946c0cf00008f85 android.hardware.audio@6.0::IStreamOut
 997fdaad7a9d17ee7e01feb7031a753e2365e72ad30b11d950e9183fabdf3844 android.hardware.audio@6.0::IStreamOutCallback
 e7ca0db9a1098210f327a9b152fa6afe6bf019c41e5264c64829d04d50c0a526 android.hardware.audio@6.0::IStreamOutEventCallback
+822369cf4dc16a6f6b9622bcf86cbdc0b692dc82193fc15e967767175cbfdd8f android.hardware.audio@6.0::types
 bee662c62d997d8065e2bcb5c1e7a9578931f22ce28fd02c219fdb4d0630abf7 android.hardware.audio.common@6.0::types
-817930d58412d662cb45e641c50cb62c727e4a3e3ffe7029a53cad9677b97d58 android.hardware.audio.effect@6.0::types
 525bec6b44f1103869c269a128d51b8dccd73af5340ba863c8886c68357c7faf android.hardware.audio.effect@6.0::IAcousticEchoCancelerEffect
 8d76bbe3719d051a8e9a1dcf9244f37f5b0a491feb249fa48391edf7cb4f3131 android.hardware.audio.effect@6.0::IAutomaticGainControlEffect
 461b1114cb35d89f87e5694e0792ba53c112a7fa9a14d9b95188cf9c4764be23 android.hardware.audio.effect@6.0::IBassBoostEffect
@@ -631,18 +630,19 @@
 5237c42d3913ef569f07bec802568084b615155d05a7951e75085da54856508c android.hardware.audio.effect@6.0::IPresetReverbEffect
 282193799d60bff27a84c65a36218c1e7d8f582f5828e2e059383d1b90aa56bd android.hardware.audio.effect@6.0::IVirtualizerEffect
 0868e00f7c5ee16723bda1a8f57099763d04100ae7126a1c2d3a9a87c844a7e8 android.hardware.audio.effect@6.0::IVisualizerEffect
+817930d58412d662cb45e641c50cb62c727e4a3e3ffe7029a53cad9677b97d58 android.hardware.audio.effect@6.0::types
 7e8e1c3d0173c5d503dd01cecff8e3864478557ca6b9e8cc2291598b1a4aea62 android.hardware.biometrics.face@1.1::IBiometricsFace
-ae6315fd42196478ac08441cb489d854118001bca5b9b9fd58af5110952be30e android.hardware.biometrics.fingerprint@2.2::types
 6828bbf18dc5d0f00c73341a10c8e4d574346c1abb1c2ed682ba5e9f8a3240d9 android.hardware.biometrics.fingerprint@2.2::IBiometricsFingerprint
 82cad99f5feb2ea9bcd4579055edf4af8feb9fc602a6e4827ddd727d254d4991 android.hardware.biometrics.fingerprint@2.2::IBiometricsFingerprintClientCallback
+ae6315fd42196478ac08441cb489d854118001bca5b9b9fd58af5110952be30e android.hardware.biometrics.fingerprint@2.2::types
 362fd1c21641c2224f3b80c30d9797b988fa3f344243d531ba73c553779a5763 android.hardware.bluetooth@1.1::IBluetoothHci
 40ab2c6866c18d32baf6e49e3053949e79601f56963a791e93e68b9ee18f718d android.hardware.bluetooth@1.1::IBluetoothHciCallbacks
 07d0a252b2d8fa35887908a996ba395cf392968395fc30afab791f46e0c22a52 android.hardware.boot@1.1::IBootControl
 74049a402be913963edfdd80828a53736570e9d8124a1bf18166b6ed46a6b0ab android.hardware.boot@1.1::types
 b8c63679e1a3874b356f3e691aecce1191d38f59063cf2ed2dce8a9d4cabf00e android.hardware.camera.device@3.6::ICameraDevice
-99aae72ae75a8ddf63ccffbc585f3a55f4d0847b4adff3d7a0f8bd9e2305bec1 android.hardware.camera.provider@2.6::ICameraProvider
-8f8d9463508ff9cae88eb35c429fd0e2dbca0ca8f5de7fdf836cc0c4370becb6 android.hardware.camera.provider@2.6::ICameraProviderCallback
 a35d5151b48505f06a775b38c0e2e265f80a845d92802324c643565807f81c53 android.hardware.camera.device@3.6::types
+21086e1c7a2acc0ebe0ff8561b11f3c2009be687a92d79b608a5f00b16c5f598 android.hardware.camera.provider@2.6::ICameraProvider
+8f8d9463508ff9cae88eb35c429fd0e2dbca0ca8f5de7fdf836cc0c4370becb6 android.hardware.camera.provider@2.6::ICameraProviderCallback
 c1aa508d00b66ed5feefea398fd5edf28fa651ac89773adad7dfda4e0a73a952 android.hardware.cas@1.2::ICas
 9811f867def49b420d8c707f7e38d3bdd64f835244e1d2a5e9762ab9835672dc android.hardware.cas@1.2::ICasListener
 f18695dd36ee205640b8326a17453858a7b4596653aaa6ef0016b0aef1bd4dac android.hardware.cas@1.2::IMediaCasService
@@ -651,9 +651,8 @@
 3581d0ba61663cdd45807494dcd697d01c074f27587df9140655f94346969cfe android.hardware.contexthub@1.1::types
 66931c2506fbb5af61f20138cb05e0a09e7bf67d6964c231d27c648933bb33ec android.hardware.drm@1.3::ICryptoFactory
 994d08ab27d613022c258a9ec48cece7adf2a305e92df5d76ef923e2c6665f64 android.hardware.drm@1.3::IDrmFactory
-d9df99be0f59d8f33a9699fe316c67bfd11818aa69440bb1123ba43e717cea85 android.hardware.dumpstate@1.1::types
 186bc152ae189ab48f3a761a44ddf5edd0d483073c5b6ca1f802f8b50488b754 android.hardware.dumpstate@1.1::IDumpstateDevice
-769d346927a94fd40ee80a5a976d8d15cf022ef99c5900738f4a82f26c0ed229 android.hardware.gnss@2.1::types
+d9df99be0f59d8f33a9699fe316c67bfd11818aa69440bb1123ba43e717cea85 android.hardware.dumpstate@1.1::types
 c319e68b03829958404402c2d9c682019678087d60495807c0a7444e0a6af981 android.hardware.gnss@2.1::IGnss
 ba5ac712b2a656dc07c83ab4a7a2c2f3bee1bbcb752e8b8ffa9b672f3b5b0728 android.hardware.gnss@2.1::IGnssAntennaInfo
 0bc3ed97cbc3f6abc89c68f4e9f4d124f9f723431997dc88c2186cf4d2ad47ee android.hardware.gnss@2.1::IGnssAntennaInfoCallback
@@ -661,15 +660,12 @@
 737d750017738f0753d13ba01a3310e0161f294b8ae80b3fd63eaa227e9d9c66 android.hardware.gnss@2.1::IGnssConfiguration
 7913a11206a577b12ade86a7cf3f95c2639cb514d086673f279bf99238c9917e android.hardware.gnss@2.1::IGnssMeasurement
 df52e2c39ed701a355b5e0fdbf83fe5fa7d04bfecd715116b39373d46dc3c682 android.hardware.gnss@2.1::IGnssMeasurementCallback
+769d346927a94fd40ee80a5a976d8d15cf022ef99c5900738f4a82f26c0ed229 android.hardware.gnss@2.1::types
 6670e7780803a8c696c6391fda5589a334b1b37dc7be9393792ed35035413633 android.hardware.gnss.measurement_corrections@1.1::IMeasurementCorrections
 956c1576ca0d6f11b42980ef59052062836b6763fe973af6cb709da50787f710 android.hardware.gnss.measurement_corrections@1.1::types
 ce8dbe76eb9ee94b46ef98f725be992e760a5751073d4f4912484026541371f3 android.hardware.health@2.1::IHealth
 26f04510a0b57aba5167c5c0a7c2f077c2acbb98b81902a072517829fd9fd67f android.hardware.health@2.1::IHealthInfoCallback
 e2f8bc1868fd4a3fd587c172773ea5a8c2f5a3deaf7958394102ca455252b255 android.hardware.health@2.1::types
-0589e410f519e36514e7ece18f283f022df0f70efd2c12821d822f67f74aba98 android.hardware.identity@1.0::types
-bbeee9604128ede83ee755b67e73b5ad29e6e1dbac9ec41fea6ffe2745b0c50a android.hardware.identity@1.0::IIdentityCredential
-96ce8aad80f4c476f25261f790d357c117e79e18474c7dadd850dac704bbe65e android.hardware.identity@1.0::IIdentityCredentialStore
-8da9c938e58f7d636ddd2f92c646f99d9a9e79612e6441b6380ab12744251873 android.hardware.identity@1.0::IWritableIdentityCredential
 27ae3724053940462114228872b3ffaf0b8e6177d5ba97f5a76339d12b8a99dd android.hardware.keymaster@4.1::IKeymasterDevice
 adb0efdf1462e9b2e742c0dcadd598666aac551f178be06e755bfcdf5797abd0 android.hardware.keymaster@4.1::IOperation
 ddcf89cd8ee2df0d32aee55050826446fb64f7aafde0a7cd946c64f61b1a364c android.hardware.keymaster@4.1::types
@@ -682,7 +678,15 @@
 2b0b10d2ea7a18a4048cd0eb83d35c19a817aeee95f65807fc31f4ef21381397 android.hardware.neuralnetworks@1.3::IPreparedModel
 eee3430cc86c97c7b407495863d8fb61da6f1a64b7721e77b9b4909b11b174e9 android.hardware.neuralnetworks@1.3::IPreparedModelCallback
 e442ab1b440327fe4e8a3b0b8ac6874e9bc6342e91fe976eb9fea77c63961ec8 android.hardware.neuralnetworks@1.3::types
-b335c3c732c799b299fa61c6de6260ab4d20cbd0ec21acd6db14d8156c745d0b android.hardware.tv.tuner@1.0::types
+b454df853441c12f6e425e8a60dd29fda20f5e6e39b93d1103e4b37495db38aa android.hardware.radio@1.5::IRadio
+fcbb0742a88215ee7a6d7ce0825d253eb2b50391fc6c8c48667f9fd7f6d4549e android.hardware.radio@1.5::IRadioIndication
+b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardware.radio@1.5::IRadioResponse
+6b8dcd5e3e33a524cc7ebb14671a76ad3a2d333467397ce82acc4024346386f8 android.hardware.radio@1.5::types
+3ca6616381080bdd6c08141ad12775a94ae868c58b02b1274ae3326f7de724ab android.hardware.sensors@2.1::ISensors
+3d4141c6373cd9ca02fe221a7d12343840de2255d032c38248fe8e35816b58b2 android.hardware.sensors@2.1::ISensorsCallback
+8051cc50fc90ed447f058a8b15d81f35a65f1bd9004b1de4f127edeb89b47978 android.hardware.sensors@2.1::types
+b37f78e3fdc79af8b32a545b2b426f1fd1355b359d9e7835f3bf1ed0aa4518d8 android.hardware.soundtrigger@2.3::ISoundTriggerHw
+4a6517ea4ad807855428b0101d8e1a486497bd88ab4300ba3b2be43d46d32580 android.hardware.soundtrigger@2.3::types
 adab52e647d1a1ccfbdabdfc9c73352f8e834b61322e505bc6e3d3a0d3acc259 android.hardware.tv.tuner@1.0::IDemux
 548e1a16fc4f779346e14968a63cd6f160e1e2c8b8c99256b2bac24a24b52a9a android.hardware.tv.tuner@1.0::IDescrambler
 b84597d59f0f1d03c9997d60eb15280f3950c287d46016240d89859789db4d47 android.hardware.tv.tuner@1.0::IDvr
@@ -695,6 +699,8 @@
 b2310785bdb55f97bbbb2176e2ee73ed8d2a7ce5895bd20c997b90c5f2868ad8 android.hardware.tv.tuner@1.0::ILnbCallback
 4788787e662293d526ff7789fc24e82533e7f6ff99a967ebc3e3ec6b17628796 android.hardware.tv.tuner@1.0::ITimeFilter
 c350c7783843e0c7cf30f90c918770b0d3c09fc0fe5e532e2f2e7210fcfe71c9 android.hardware.tv.tuner@1.0::ITuner
+b335c3c732c799b299fa61c6de6260ab4d20cbd0ec21acd6db14d8156c745d0b android.hardware.tv.tuner@1.0::types
+7746fda1fbf9c7c132bae701cc5a161309e4f5e7f3e8065811045975ee86196d android.hardware.usb.gadget@1.1::IUsbGadget
 3e01d4446cd69fd1c48f8572efd97487bc179564b32bd795800b97bbe10be37b android.hardware.wifi@1.4::IWifi
 c67aaf26a7a40d14ea61e70e20afacbd0bb906df1704d585ac8599fbb69dd44b android.hardware.wifi.hostapd@1.2::IHostapd
 2b5a7ea572b736030c64a3b4043af244425477c4672301780fe15aba5ed393d9 android.hardware.wifi.hostapd@1.2::types
@@ -703,13 +709,3 @@
 2ce1f7fb52e49f80b13a9b153d491bce530552f02357ea729acae922a8659f93 android.hardware.wifi.supplicant@1.3::ISupplicantStaIfaceCallback
 77531c8d048f8f8ae532babd0ca86332a865ec9aace1b051226ef2b21123e645 android.hardware.wifi.supplicant@1.3::ISupplicantStaNetwork
 98592d193a717066facf91428426e5abe211e3bd718bc372e29fb944ddbe6e7c android.hardware.wifi.supplicant@1.3::types
-e7669bddacbdaee2cd9a87762a13fb7648639eead54bf4d767dc06eaaeb35736 android.hardware.radio@1.5::types
-b454df853441c12f6e425e8a60dd29fda20f5e6e39b93d1103e4b37495db38aa android.hardware.radio@1.5::IRadio
-fcbb0742a88215ee7a6d7ce0825d253eb2b50391fc6c8c48667f9fd7f6d4549e android.hardware.radio@1.5::IRadioIndication
-b809193970a91ca637a4b0184767315601d32e3ef3d5992ffbc7a8d14a14f015 android.hardware.radio@1.5::IRadioResponse
-3ca6616381080bdd6c08141ad12775a94ae868c58b02b1274ae3326f7de724ab android.hardware.sensors@2.1::ISensors
-3d4141c6373cd9ca02fe221a7d12343840de2255d032c38248fe8e35816b58b2 android.hardware.sensors@2.1::ISensorsCallback
-8051cc50fc90ed447f058a8b15d81f35a65f1bd9004b1de4f127edeb89b47978 android.hardware.sensors@2.1::types
-4a6517ea4ad807855428b0101d8e1a486497bd88ab4300ba3b2be43d46d32580 android.hardware.soundtrigger@2.3::types
-b37f78e3fdc79af8b32a545b2b426f1fd1355b359d9e7835f3bf1ed0aa4518d8 android.hardware.soundtrigger@2.3::ISoundTriggerHw
-7746fda1fbf9c7c132bae701cc5a161309e4f5e7f3e8065811045975ee86196d android.hardware.usb.gadget@1.1::IUsbGadget
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index e28605d..ae1e3a2 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -125,7 +125,9 @@
 // Test driver for those generated from ml/nn/runtime/test/spec
 void Execute(const sp<IDevice>& device, const TestModel& testModel) {
     const Model model = createModel(testModel);
-    const Request request = createRequest(testModel);
+
+    ExecutionContext context;
+    const Request request = context.createRequest(testModel);
 
     // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
@@ -143,7 +145,7 @@
     ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
 
     // Retrieve execution results.
-    const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
 
     // We want "close-enough" results.
     checkResults(testModel, outputs);
@@ -158,6 +160,10 @@
     return TestModelManager::get().getTestModels(filter);
 }
 
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
+    return TestModelManager::get().getTestModels(filter);
+}
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
     const auto& [namedDevice, namedModel] = info.param;
     return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
index f230a02..1a55c2f 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.h
@@ -37,6 +37,9 @@
 using FilterFn = std::function<bool(const test_helper::TestModel&)>;
 std::vector<NamedModel> getNamedModels(const FilterFn& filter);
 
+using FilterNameFn = std::function<bool(const std::string&)>;
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter);
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
 
 #define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                     \
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 0dba85a..3613e69 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -21,10 +21,13 @@
 
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
 #include <android/hidl/allocator/1.0/IAllocator.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
+#include <vndk/hardware_buffer.h>
 
+#include <gtest/gtest.h>
 #include <algorithm>
 #include <iostream>
 #include <vector>
@@ -37,10 +40,64 @@
 using V1_0::Request;
 using V1_0::RequestArgument;
 
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
+std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
+    auto ashmem = std::make_unique<TestAshmem>(size);
+    return ashmem->mIsValid ? std::move(ashmem) : nullptr;
+}
 
-Request createRequest(const TestModel& testModel) {
+void TestAshmem::initialize(uint32_t size) {
+    mIsValid = false;
+    ASSERT_GT(size, 0);
+    mHidlMemory = nn::allocateSharedMemory(size);
+    ASSERT_TRUE(mHidlMemory.valid());
+    mMappedMemory = mapMemory(mHidlMemory);
+    ASSERT_NE(mMappedMemory, nullptr);
+    mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
+    ASSERT_NE(mPtr, nullptr);
+    mIsValid = true;
+}
+
+std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
+    auto ahwb = std::make_unique<TestBlobAHWB>(size);
+    return ahwb->mIsValid ? std::move(ahwb) : nullptr;
+}
+
+void TestBlobAHWB::initialize(uint32_t size) {
+    mIsValid = false;
+    ASSERT_GT(size, 0);
+    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+    const AHardwareBuffer_Desc desc = {
+            .width = size,
+            .height = 1,
+            .layers = 1,
+            .format = AHARDWAREBUFFER_FORMAT_BLOB,
+            .usage = usage,
+            .stride = size,
+    };
+    ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
+    ASSERT_NE(mAhwb, nullptr);
+
+    void* buffer = nullptr;
+    ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
+    ASSERT_NE(buffer, nullptr);
+    mPtr = static_cast<uint8_t*>(buffer);
+
+    const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
+    ASSERT_NE(handle, nullptr);
+    mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+    mIsValid = true;
+}
+
+TestBlobAHWB::~TestBlobAHWB() {
+    if (mAhwb) {
+        AHardwareBuffer_unlock(mAhwb, nullptr);
+        AHardwareBuffer_release(mAhwb);
+    }
+}
+
+Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
+    CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
+
     // Model inputs.
     hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
     size_t inputSize = 0;
@@ -80,16 +137,19 @@
     }
 
     // Allocate memory pools.
-    hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
-                                   nn::allocateSharedMemory(outputSize)};
-    CHECK_NE(pools[kInputPoolIndex].size(), 0u);
-    CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
-    sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
-    CHECK(inputMemory.get() != nullptr);
-    uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
-    CHECK(inputPtr != nullptr);
+    if (memoryType == MemoryType::ASHMEM) {
+        mInputMemory = TestAshmem::create(inputSize);
+        mOutputMemory = TestAshmem::create(outputSize);
+    } else {
+        mInputMemory = TestBlobAHWB::create(inputSize);
+        mOutputMemory = TestBlobAHWB::create(outputSize);
+    }
+    EXPECT_NE(mInputMemory, nullptr);
+    EXPECT_NE(mOutputMemory, nullptr);
+    hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
 
     // Copy input data to the memory pool.
+    uint8_t* inputPtr = mInputMemory->getPointer();
     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
         const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
         if (op.data.size() > 0) {
@@ -102,18 +162,13 @@
     return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
 }
 
-std::vector<TestBuffer> getOutputBuffers(const Request& request) {
-    sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
-    CHECK(outputMemory.get() != nullptr);
-    uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
-    CHECK(outputPtr != nullptr);
-
+std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
     // Copy out output results.
+    uint8_t* outputPtr = mOutputMemory->getPointer();
     std::vector<TestBuffer> outputBuffers;
     for (const auto& output : request.outputs) {
         outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
     }
-
     return outputBuffers;
 }
 
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index cb22250..2c17796 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -129,11 +129,17 @@
 
 TEST_P(ValidationTest, Test) {
     const Model model = createModel(kTestModel);
-    const Request request = createRequest(kTestModel);
+    ExecutionContext context;
+    const Request request = context.createRequest(kTestModel);
     ASSERT_FALSE(kTestModel.expectFailure);
     validateEverything(kDevice, model, request);
 }
 
-INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
+INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
+    // Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
+    // generated tests.
+    return testName.find("inputs_as_internal") == std::string::npos &&
+           testName.find("all_tensors_as_inputs") == std::string::npos;
+});
 
 }  // namespace android::hardware::neuralnetworks::V1_0::vts::functional
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 6d4534c..3292f79 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -19,6 +19,8 @@
 
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
 #include <algorithm>
 #include <iosfwd>
 #include <string>
@@ -28,11 +30,73 @@
 
 namespace android::hardware::neuralnetworks {
 
-// Create HIDL Request from the TestModel struct.
-V1_0::Request createRequest(const test_helper::TestModel& testModel);
+// Convenience class to manage the lifetime of memory resources.
+class TestMemoryBase {
+    DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
 
-// After execution, copy out output results from the output memory pool.
-std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
+  public:
+    TestMemoryBase() = default;
+    virtual ~TestMemoryBase() = default;
+    uint8_t* getPointer() const { return mPtr; }
+    hidl_memory getHidlMemory() const { return mHidlMemory; }
+
+  protected:
+    uint8_t* mPtr = nullptr;
+    hidl_memory mHidlMemory;
+    bool mIsValid = false;
+};
+
+class TestAshmem : public TestMemoryBase {
+  public:
+    static std::unique_ptr<TestAshmem> create(uint32_t size);
+
+    // Prefer TestAshmem::create.
+    // The constructor calls initialize, which constructs the memory resources. This is a workaround
+    // that gtest macros cannot be used directly in a constructor.
+    TestAshmem(uint32_t size) { initialize(size); }
+
+  private:
+    void initialize(uint32_t size);
+    sp<hidl::memory::V1_0::IMemory> mMappedMemory;
+};
+
+class TestBlobAHWB : public TestMemoryBase {
+  public:
+    static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
+
+    // Prefer TestBlobAHWB::create.
+    // The constructor calls initialize, which constructs the memory resources. This is a
+    // workaround that gtest macros cannot be used directly in a constructor.
+    TestBlobAHWB(uint32_t size) { initialize(size); }
+    ~TestBlobAHWB();
+
+  private:
+    void initialize(uint32_t size);
+    AHardwareBuffer* mAhwb = nullptr;
+};
+
+enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
+
+// Manages the lifetime of memory resources used in an execution.
+class ExecutionContext {
+    DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
+
+  public:
+    static constexpr uint32_t kInputPoolIndex = 0;
+    static constexpr uint32_t kOutputPoolIndex = 1;
+
+    ExecutionContext() = default;
+
+    // Create HIDL Request from the TestModel struct.
+    V1_0::Request createRequest(const test_helper::TestModel& testModel,
+                                MemoryType memoryType = MemoryType::ASHMEM);
+
+    // After execution, copy out output results from the output memory pool.
+    std::vector<test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request) const;
+
+  private:
+    std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+};
 
 // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
 // so this is efficiently accomplished by moving the element to the end and
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
index cee15a3..a233835 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.cpp
@@ -133,7 +133,9 @@
 // Test driver for those generated from ml/nn/runtime/test/spec
 void Execute(const sp<IDevice>& device, const TestModel& testModel) {
     const Model model = createModel(testModel);
-    const Request request = createRequest(testModel);
+
+    ExecutionContext context;
+    const Request request = context.createRequest(testModel);
 
     // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
@@ -151,7 +153,7 @@
     ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
 
     // Retrieve execution results.
-    const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
 
     // We want "close-enough" results.
     checkResults(testModel, outputs);
@@ -166,6 +168,10 @@
     return TestModelManager::get().getTestModels(filter);
 }
 
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
+    return TestModelManager::get().getTestModels(filter);
+}
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
     const auto& [namedDevice, namedModel] = info.param;
     return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
diff --git a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
index cf449ea..4b1a96e 100644
--- a/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.1/vts/functional/GeneratedTestHarness.h
@@ -37,6 +37,9 @@
 using FilterFn = std::function<bool(const test_helper::TestModel&)>;
 std::vector<NamedModel> getNamedModels(const FilterFn& filter);
 
+using FilterNameFn = std::function<bool(const std::string&)>;
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter);
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
 
 #define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                     \
diff --git a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
index d56d40b..54e8802 100644
--- a/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.1/vts/functional/VtsHalNeuralnetworks.cpp
@@ -132,11 +132,17 @@
 
 TEST_P(ValidationTest, Test) {
     const Model model = createModel(kTestModel);
-    const Request request = createRequest(kTestModel);
+    ExecutionContext context;
+    const Request request = context.createRequest(kTestModel);
     ASSERT_FALSE(kTestModel.expectFailure);
     validateEverything(kDevice, model, request);
 }
 
-INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
+INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
+    // Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
+    // generated tests.
+    return testName.find("inputs_as_internal") == std::string::npos &&
+           testName.find("all_tensors_as_inputs") == std::string::npos;
+});
 
 }  // namespace android::hardware::neuralnetworks::V1_1::vts::functional
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
index 3ab0135..35275b4 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.cpp
@@ -68,6 +68,7 @@
     Executor executor;
     MeasureTiming measureTiming;
     OutputType outputType;
+    MemoryType memoryType;
 };
 
 }  // namespace
@@ -216,7 +217,8 @@
         return;
     }
 
-    Request request = createRequest(testModel);
+    ExecutionContext context;
+    Request request = context.createRequest(testModel, testConfig.memoryType);
     if (testConfig.outputType == OutputType::INSUFFICIENT) {
         makeOutputInsufficientSize(/*outputIndex=*/0, &request);
     }
@@ -326,7 +328,7 @@
     }
 
     // Retrieve execution results.
-    const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
 
     // We want "close-enough" results.
     checkResults(testModel, outputs);
@@ -337,24 +339,30 @@
     std::vector<OutputType> outputTypesList;
     std::vector<MeasureTiming> measureTimingList;
     std::vector<Executor> executorList;
+    std::vector<MemoryType> memoryTypeList;
 
     if (testDynamicOutputShape) {
         outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
         measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
         executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        memoryTypeList = {MemoryType::ASHMEM};
     } else {
         outputTypesList = {OutputType::FULLY_SPECIFIED};
         measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
         executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+        memoryTypeList = {MemoryType::ASHMEM, MemoryType::BLOB_AHWB};
     }
 
     for (const OutputType outputType : outputTypesList) {
         for (const MeasureTiming measureTiming : measureTimingList) {
             for (const Executor executor : executorList) {
-                const TestConfig testConfig = {.executor = executor,
-                                               .measureTiming = measureTiming,
-                                               .outputType = outputType};
-                EvaluatePreparedModel(preparedModel, testModel, testConfig);
+                for (const MemoryType memoryType : memoryTypeList) {
+                    const TestConfig testConfig = {.executor = executor,
+                                                   .measureTiming = measureTiming,
+                                                   .outputType = outputType,
+                                                   .memoryType = memoryType};
+                    EvaluatePreparedModel(preparedModel, testModel, testConfig);
+                }
             }
         }
     }
@@ -382,6 +390,10 @@
     return TestModelManager::get().getTestModels(filter);
 }
 
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
+    return TestModelManager::get().getTestModels(filter);
+}
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
     const auto& [namedDevice, namedModel] = info.param;
     return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
diff --git a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
index dfc980c..98295ff 100644
--- a/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.2/vts/functional/GeneratedTestHarness.h
@@ -41,6 +41,9 @@
 using FilterFn = std::function<bool(const test_helper::TestModel&)>;
 std::vector<NamedModel> getNamedModels(const FilterFn& filter);
 
+using FilterNameFn = std::function<bool(const std::string&)>;
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter);
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
 
 #define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                     \
diff --git a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
index 4fbd0e2..a60ec4d 100644
--- a/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.2/vts/functional/VtsHalNeuralnetworks.cpp
@@ -153,7 +153,8 @@
 
 TEST_P(ValidationTest, Test) {
     const Model model = createModel(kTestModel);
-    const Request request = createRequest(kTestModel);
+    ExecutionContext context;
+    const Request request = context.createRequest(kTestModel);
     if (kTestModel.expectFailure) {
         validateFailure(kDevice, model, request);
     } else {
@@ -161,7 +162,12 @@
     }
 }
 
-INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
+INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
+    // Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
+    // generated tests.
+    return testName.find("inputs_as_internal") == std::string::npos &&
+           testName.find("all_tensors_as_inputs") == std::string::npos;
+});
 
 sp<IPreparedModel> getPreparedModel_1_2(const sp<implementation::PreparedModelCallback>& callback) {
     sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
diff --git a/neuralnetworks/1.3/vts/functional/Android.bp b/neuralnetworks/1.3/vts/functional/Android.bp
index f936267..545a5be 100644
--- a/neuralnetworks/1.3/vts/functional/Android.bp
+++ b/neuralnetworks/1.3/vts/functional/Android.bp
@@ -40,6 +40,7 @@
         "BasicTests.cpp",
         "CompilationCachingTests.cpp",
         "GeneratedTestHarness.cpp",
+        "MemoryDomainTests.cpp",
         "QualityOfServiceTests.cpp",
         "TestAssertions.cpp",
         "ValidateBurst.cpp",
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
index 5689a39..4dbac16 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.cpp
@@ -72,21 +72,10 @@
 
 namespace {
 
-enum class Executor { ASYNC, SYNC, BURST, FENCED };
-
 enum class OutputType { FULLY_SPECIFIED, UNSPECIFIED, INSUFFICIENT, MISSED_DEADLINE };
 
-enum class MemoryType { SHARED, DEVICE };
-
 enum class IOType { INPUT, OUTPUT };
 
-static void waitForSyncFence(int syncFd) {
-    constexpr int kInfiniteTimeout = -1;
-    ASSERT_GT(syncFd, 0);
-    int r = sync_wait(syncFd, kInfiniteTimeout);
-    ASSERT_GE(r, 0);
-}
-
 struct TestConfig {
     Executor executor;
     MeasureTiming measureTiming;
@@ -277,6 +266,13 @@
 
 }  // namespace
 
+void waitForSyncFence(int syncFd) {
+    constexpr int kInfiniteTimeout = -1;
+    ASSERT_GT(syncFd, 0);
+    int r = sync_wait(syncFd, kInfiniteTimeout);
+    ASSERT_GE(r, 0);
+}
+
 Model createModel(const TestModel& testModel) {
     uint32_t constCopySize = 0;
     uint32_t constRefSize = 0;
@@ -338,21 +334,39 @@
     }
 }
 
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
-constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+class ExecutionContextV1_3 {
+  public:
+    ExecutionContextV1_3(sp<IDevice> device, sp<IPreparedModel> preparedModel)
+        : kDevice(std::move(device)), kPreparedModel(std::move(preparedModel)) {}
 
-static std::pair<Request, std::vector<sp<IBuffer>>> createRequest(
-        const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
-        const TestModel& testModel, bool preferDeviceMemory) {
+    std::optional<Request> createRequest(const TestModel& testModel, MemoryType memoryType);
+    std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel,
+                                             const Request& request) const;
+
+  private:
+    // Get a TestBuffer with data copied from an IBuffer object.
+    void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) const;
+
+    static constexpr uint32_t kInputPoolIndex = 0;
+    static constexpr uint32_t kOutputPoolIndex = 1;
+    static constexpr uint32_t kDeviceMemoryBeginIndex = 2;
+
+    const sp<IDevice> kDevice;
+    const sp<IPreparedModel> kPreparedModel;
+    std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+    std::vector<sp<IBuffer>> mBuffers;
+};
+
+std::optional<Request> ExecutionContextV1_3::createRequest(const TestModel& testModel,
+                                                           MemoryType memoryType) {
     // Memory pools are organized as:
     // - 0: Input shared memory pool
     // - 1: Output shared memory pool
     // - [2, 2+i): Input device memories
     // - [2+i, 2+i+o): Output device memories
-    DeviceMemoryAllocator allocator(device, preparedModel, testModel);
-    std::vector<sp<IBuffer>> buffers;
+    DeviceMemoryAllocator allocator(kDevice, kPreparedModel, testModel);
     std::vector<uint32_t> tokens;
+    mBuffers.clear();
 
     // Model inputs.
     hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
@@ -363,13 +377,13 @@
             // Omitted input.
             inputs[i] = {.hasNoValue = true};
             continue;
-        } else if (preferDeviceMemory) {
+        } else if (memoryType == MemoryType::DEVICE) {
             SCOPED_TRACE("Input index = " + std::to_string(i));
             auto [buffer, token] = allocator.allocate<IOType::INPUT>(i);
             if (buffer != nullptr) {
-                DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+                DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
                                                                        kDeviceMemoryBeginIndex)};
-                buffers.push_back(std::move(buffer));
+                mBuffers.push_back(std::move(buffer));
                 tokens.push_back(token);
                 inputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
                 continue;
@@ -389,13 +403,13 @@
     size_t outputSize = 0;
     for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
         const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
-        if (preferDeviceMemory) {
+        if (memoryType == MemoryType::DEVICE) {
             SCOPED_TRACE("Output index = " + std::to_string(i));
             auto [buffer, token] = allocator.allocate<IOType::OUTPUT>(i);
             if (buffer != nullptr) {
-                DataLocation loc = {.poolIndex = static_cast<uint32_t>(buffers.size() +
+                DataLocation loc = {.poolIndex = static_cast<uint32_t>(mBuffers.size() +
                                                                        kDeviceMemoryBeginIndex)};
-                buffers.push_back(std::move(buffer));
+                mBuffers.push_back(std::move(buffer));
                 tokens.push_back(token);
                 outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
                 continue;
@@ -418,21 +432,29 @@
         outputs[i] = {.hasNoValue = false, .location = loc, .dimensions = {}};
     }
 
+    if (memoryType == MemoryType::DEVICE && mBuffers.empty()) {
+        return std::nullopt;
+    }
+
     // Memory pools.
-    hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + buffers.size());
-    pools[kInputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(inputSize, 1)));
-    pools[kOutputPoolIndex].hidlMemory(nn::allocateSharedMemory(std::max<size_t>(outputSize, 1)));
-    CHECK_NE(pools[kInputPoolIndex].hidlMemory().size(), 0u);
-    CHECK_NE(pools[kOutputPoolIndex].hidlMemory().size(), 0u);
-    for (uint32_t i = 0; i < buffers.size(); i++) {
+    hidl_vec<Request::MemoryPool> pools(kDeviceMemoryBeginIndex + mBuffers.size());
+    if (memoryType == MemoryType::BLOB_AHWB) {
+        mInputMemory = TestBlobAHWB::create(std::max<size_t>(inputSize, 1));
+        mOutputMemory = TestBlobAHWB::create(std::max<size_t>(outputSize, 1));
+    } else {
+        mInputMemory = TestAshmem::create(std::max<size_t>(inputSize, 1));
+        mOutputMemory = TestAshmem::create(std::max<size_t>(outputSize, 1));
+    }
+    EXPECT_NE(mInputMemory, nullptr);
+    EXPECT_NE(mOutputMemory, nullptr);
+    pools[kInputPoolIndex].hidlMemory(mInputMemory->getHidlMemory());
+    pools[kOutputPoolIndex].hidlMemory(mOutputMemory->getHidlMemory());
+    for (uint32_t i = 0; i < mBuffers.size(); i++) {
         pools[kDeviceMemoryBeginIndex + i].token(tokens[i]);
     }
 
     // Copy input data to the input shared memory pool.
-    sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex].hidlMemory());
-    CHECK(inputMemory.get() != nullptr);
-    uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
-    CHECK(inputPtr != nullptr);
+    uint8_t* inputPtr = mInputMemory->getPointer();
     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
         if (!inputs[i].hasNoValue && inputs[i].location.poolIndex == kInputPoolIndex) {
             const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
@@ -441,14 +463,38 @@
             std::copy(begin, end, inputPtr + inputs[i].location.offset);
         }
     }
-
-    Request request = {
+    return Request{
             .inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
-    return {std::move(request), std::move(buffers)};
+}
+
+std::vector<TestBuffer> ExecutionContextV1_3::getOutputBuffers(const TestModel& testModel,
+                                                               const Request& request) const {
+    // Copy out output results.
+    uint8_t* outputPtr = mOutputMemory->getPointer();
+    std::vector<TestBuffer> outputBuffers;
+    for (uint32_t i = 0; i < request.outputs.size(); i++) {
+        const auto& outputLoc = request.outputs[i].location;
+        if (outputLoc.poolIndex == kOutputPoolIndex) {
+            outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
+        } else {
+            const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
+            if (op.data.size() == 0) {
+                outputBuffers.emplace_back(0, nullptr);
+            } else {
+                SCOPED_TRACE("Output index = " + std::to_string(i));
+                const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
+                TestBuffer buffer;
+                getBuffer(mBuffers[bufferIndex], op.data.size(), &buffer);
+                outputBuffers.push_back(std::move(buffer));
+            }
+        }
+    }
+    return outputBuffers;
 }
 
 // Get a TestBuffer with data copied from an IBuffer object.
-static void getBuffer(const sp<IBuffer>& buffer, size_t size, TestBuffer* testBuffer) {
+void ExecutionContextV1_3::getBuffer(const sp<IBuffer>& buffer, size_t size,
+                                     TestBuffer* testBuffer) const {
     // IBuffer -> Shared memory.
     hidl_memory tmp = nn::allocateSharedMemory(size);
     const auto ret = buffer->copyTo(tmp);
@@ -464,35 +510,6 @@
     *testBuffer = TestBuffer(size, outputPtr);
 }
 
-static std::vector<TestBuffer> getOutputBuffers(const TestModel& testModel, const Request& request,
-                                                const std::vector<sp<IBuffer>>& buffers) {
-    sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex].hidlMemory());
-    CHECK(outputMemory.get() != nullptr);
-    uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
-    CHECK(outputPtr != nullptr);
-
-    // Copy out output results.
-    std::vector<TestBuffer> outputBuffers;
-    for (uint32_t i = 0; i < request.outputs.size(); i++) {
-        const auto& outputLoc = request.outputs[i].location;
-        if (outputLoc.poolIndex == kOutputPoolIndex) {
-            outputBuffers.emplace_back(outputLoc.length, outputPtr + outputLoc.offset);
-        } else {
-            const auto& op = testModel.main.operands[testModel.main.outputIndexes[i]];
-            if (op.data.size() == 0) {
-                outputBuffers.emplace_back();
-            } else {
-                SCOPED_TRACE("Output index = " + std::to_string(i));
-                const uint32_t bufferIndex = outputLoc.poolIndex - kDeviceMemoryBeginIndex;
-                TestBuffer buffer;
-                getBuffer(buffers[bufferIndex], op.data.size(), &buffer);
-                outputBuffers.push_back(std::move(buffer));
-            }
-        }
-    }
-    return outputBuffers;
-}
-
 static bool hasZeroSizedOutput(const TestModel& testModel) {
     return std::any_of(testModel.main.outputIndexes.begin(), testModel.main.outputIndexes.end(),
                        [&testModel](uint32_t index) {
@@ -543,13 +560,14 @@
         return;
     }
 
-    auto [request, buffers] =
-            createRequest(device, preparedModel, testModel,
-                          /*preferDeviceMemory=*/testConfig.memoryType == MemoryType::DEVICE);
+    ExecutionContextV1_3 context(device, preparedModel);
+    auto maybeRequest = context.createRequest(testModel, testConfig.memoryType);
     // Skip if testing memory domain but no device memory has been allocated.
-    if (testConfig.memoryType == MemoryType::DEVICE && buffers.empty()) {
+    if (!maybeRequest.has_value()) {
         return;
     }
+
+    Request request = std::move(maybeRequest.value());
     if (testConfig.outputType == OutputType::INSUFFICIENT) {
         makeOutputInsufficientSize(/*outputIndex=*/0, &request);
     }
@@ -648,6 +666,7 @@
                 ASSERT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
                 ASSERT_EQ(fencedCallback, nullptr);
                 executionStatus = result;
+                timing = {UINT64_MAX, UINT64_MAX};
             } else if (syncFenceHandle.getNativeHandle()) {
                 // If a sync fence is returned, try start another run waiting for the sync fence.
                 ret = preparedModel->executeFenced(request, {syncFenceHandle},
@@ -744,7 +763,7 @@
     }
 
     // Retrieve execution results.
-    const std::vector<TestBuffer> outputs = getOutputBuffers(testModel, request, buffers);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(testModel, request);
 
     // We want "close-enough" results.
     checkResults(testModel, outputs);
@@ -755,29 +774,32 @@
     std::vector<OutputType> outputTypesList;
     std::vector<MeasureTiming> measureTimingList;
     std::vector<Executor> executorList;
-    MemoryType memoryType = MemoryType::SHARED;
+    std::vector<MemoryType> memoryTypeList;
 
     switch (testKind) {
         case TestKind::GENERAL: {
             outputTypesList = {OutputType::FULLY_SPECIFIED};
             measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
             executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST};
+            memoryTypeList = {MemoryType::ASHMEM};
         } break;
         case TestKind::DYNAMIC_SHAPE: {
             outputTypesList = {OutputType::UNSPECIFIED, OutputType::INSUFFICIENT};
             measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
             executorList = {Executor::ASYNC, Executor::SYNC, Executor::BURST, Executor::FENCED};
+            memoryTypeList = {MemoryType::ASHMEM};
         } break;
         case TestKind::MEMORY_DOMAIN: {
             outputTypesList = {OutputType::FULLY_SPECIFIED};
             measureTimingList = {MeasureTiming::NO};
             executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
-            memoryType = MemoryType::DEVICE;
+            memoryTypeList = {MemoryType::BLOB_AHWB, MemoryType::DEVICE};
         } break;
         case TestKind::FENCED_COMPUTE: {
             outputTypesList = {OutputType::FULLY_SPECIFIED};
             measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
             executorList = {Executor::FENCED};
+            memoryTypeList = {MemoryType::ASHMEM};
         } break;
         case TestKind::QUANTIZATION_COUPLING: {
             LOG(FATAL) << "Wrong TestKind for EvaluatePreparedModel";
@@ -788,14 +810,17 @@
             measureTimingList = {MeasureTiming::NO, MeasureTiming::YES};
             // Burst does not support V1_3 loop timeout.
             executorList = {Executor::ASYNC, Executor::SYNC, Executor::FENCED};
+            memoryTypeList = {MemoryType::ASHMEM};
         } break;
     }
 
     for (const OutputType outputType : outputTypesList) {
         for (const MeasureTiming measureTiming : measureTimingList) {
             for (const Executor executor : executorList) {
-                const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
-                EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+                for (const MemoryType memoryType : memoryTypeList) {
+                    const TestConfig testConfig(executor, measureTiming, outputType, memoryType);
+                    EvaluatePreparedModel(device, preparedModel, testModel, testConfig);
+                }
             }
         }
     }
@@ -814,7 +839,7 @@
     for (const OutputType outputType : outputTypesList) {
         for (const MeasureTiming measureTiming : measureTimingList) {
             for (const Executor executor : executorList) {
-                const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::SHARED,
+                const TestConfig testConfig(executor, measureTiming, outputType, MemoryType::ASHMEM,
                                             /*reportSkipping=*/false);
                 bool baseSkipped = false;
                 EvaluatePreparedModel(device, preparedModel, testModel, testConfig, &baseSkipped);
@@ -891,6 +916,10 @@
     return TestModelManager::get().getTestModels(filter);
 }
 
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter) {
+    return TestModelManager::get().getTestModels(filter);
+}
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info) {
     const auto& [namedDevice, namedModel] = info.param;
     return gtestCompliantName(getName(namedDevice) + "_" + getName(namedModel));
diff --git a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
index 834d335..4f05c48 100644
--- a/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
+++ b/neuralnetworks/1.3/vts/functional/GeneratedTestHarness.h
@@ -41,6 +41,9 @@
 using FilterFn = std::function<bool(const test_helper::TestModel&)>;
 std::vector<NamedModel> getNamedModels(const FilterFn& filter);
 
+using FilterNameFn = std::function<bool(const std::string&)>;
+std::vector<NamedModel> getNamedModels(const FilterNameFn& filter);
+
 std::string printGeneratedTest(const testing::TestParamInfo<GeneratedTestParam>& info);
 
 #define INSTANTIATE_GENERATED_TEST(TestSuite, filter)                                     \
@@ -77,6 +80,8 @@
 void EvaluatePreparedModel(const sp<IDevice>& device, const sp<IPreparedModel>& preparedModel,
                            const test_helper::TestModel& testModel, TestKind testKind);
 
+void waitForSyncFence(int syncFd);
+
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_GENERATED_TEST_HARNESS_H
diff --git a/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp
new file mode 100644
index 0000000..3c0c885
--- /dev/null
+++ b/neuralnetworks/1.3/vts/functional/MemoryDomainTests.cpp
@@ -0,0 +1,1203 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "neuralnetworks_hidl_hal_test"
+
+#include <android-base/logging.h>
+#include <gtest/gtest.h>
+
+#include "1.3/Callbacks.h"
+#include "1.3/Utils.h"
+#include "GeneratedTestHarness.h"
+#include "MemoryUtils.h"
+#include "TestHarness.h"
+#include "Utils.h"
+#include "VtsHalNeuralnetworks.h"
+
+namespace android::hardware::neuralnetworks::V1_3::vts::functional {
+
+using namespace test_helper;
+using implementation::ExecutionCallback;
+using implementation::PreparedModelCallback;
+using V1_0::RequestArgument;
+using V1_1::ExecutionPreference;
+using V1_2::Constant;
+using V1_2::MeasureTiming;
+using V1_2::OutputShape;
+using V1_2::Timing;
+
+namespace {
+
+const auto kNamedDeviceChoices = testing::ValuesIn(getNamedDevices());
+
+// A 1.3 driver is likely to support at least one of the following operand types.
+const std::vector<TestOperandType> kTestOperandTypeChoicesVector = {
+        TestOperandType::TENSOR_FLOAT32,
+        TestOperandType::TENSOR_FLOAT16,
+        TestOperandType::TENSOR_QUANT8_ASYMM,
+        TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+};
+const auto kTestOperandTypeChoices = testing::ValuesIn(kTestOperandTypeChoicesVector);
+
+bool isInChoices(TestOperandType type) {
+    return std::count(kTestOperandTypeChoicesVector.begin(), kTestOperandTypeChoicesVector.end(),
+                      type) > 0;
+}
+
+bool isFloat(TestOperandType type) {
+    CHECK(isInChoices(type));
+    return type == TestOperandType::TENSOR_FLOAT32 || type == TestOperandType::TENSOR_FLOAT16;
+}
+
+// Create dummy buffers for model constants as well as inputs and outputs.
+// We only care about the size here because we will not check accuracy in validation tests.
+void createDummyData(TestModel* testModel) {
+    for (auto& operand : testModel->main.operands) {
+        if (operand.data != nullptr) continue;
+        switch (operand.lifetime) {
+            case TestOperandLifeTime::SUBGRAPH_INPUT:
+            case TestOperandLifeTime::SUBGRAPH_OUTPUT:
+            case TestOperandLifeTime::CONSTANT_COPY:
+            case TestOperandLifeTime::CONSTANT_REFERENCE: {
+                const uint32_t size = nn::nonExtensionOperandSizeOfData(
+                        static_cast<OperandType>(operand.type), operand.dimensions);
+                operand.data = TestBuffer(size);
+            } break;
+            default:
+                break;
+        }
+    }
+}
+
+TestOperand createInt32Scalar(int32_t value) {
+    return {
+            .type = TestOperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = TestOperandLifeTime::CONSTANT_COPY,
+            .data = TestBuffer::createFromVector<int32_t>({value}),
+    };
+}
+
+// Construct a test model with multiple CONV_2D operations with the given operand as inputs.
+// The dimensions of the filters are chosen to ensure outputs has the same dimensions as inputs.
+// We choose CONV_2D operation because it is commonly supported by most drivers.
+TestModel createConvModel(const TestOperand& operand, uint32_t numOperations) {
+    CHECK(isInChoices(operand.type));
+
+    TestOperand weight = {.type = operand.type,
+                          .dimensions = {operand.dimensions[3], 3, 3, operand.dimensions[3]},
+                          .numberOfConsumers = 1,
+                          .scale = isFloat(operand.type) ? 0.0f : 1.0f,
+                          .zeroPoint = 0,
+                          .lifetime = TestOperandLifeTime::CONSTANT_COPY};
+
+    TestOperand bias = {
+            .type = isFloat(operand.type) ? operand.type : TestOperandType::TENSOR_INT32,
+            .dimensions = {operand.dimensions[3]},
+            .numberOfConsumers = 1,
+            .scale = operand.scale * weight.scale,
+            .zeroPoint = 0,
+            .lifetime = TestOperandLifeTime::CONSTANT_COPY};
+
+    TestOperand output = operand;
+    output.numberOfConsumers = 0;
+    output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
+
+    const std::vector<TestOperand> operands = {
+            operand,
+            std::move(weight),
+            std::move(bias),
+            createInt32Scalar(1),  // same padding
+            createInt32Scalar(1),  // width stride
+            createInt32Scalar(1),  // height stride
+            createInt32Scalar(0),  // activation = NONE
+            std::move(output),
+    };
+
+    TestModel model;
+    for (uint32_t i = 0; i < numOperations; i++) {
+        model.main.operands.insert(model.main.operands.end(), operands.begin(), operands.end());
+        const uint32_t inputIndex = operands.size() * i;
+        const uint32_t outputIndex = inputIndex + operands.size() - 1;
+        std::vector<uint32_t> inputs(operands.size() - 1);
+        std::iota(inputs.begin(), inputs.end(), inputIndex);
+        model.main.operations.push_back({.type = TestOperationType::CONV_2D,
+                                         .inputs = std::move(inputs),
+                                         .outputs = {outputIndex}});
+        model.main.inputIndexes.push_back(inputIndex);
+        model.main.outputIndexes.push_back(outputIndex);
+    }
+    createDummyData(&model);
+    return model;
+}
+
+// Construct a test model with a single ADD operation with the given operand as input0 and input1.
+// This is to cover additional cases that the CONV_2D model does not support, e.g. arbitrary input
+// operand rank, scalar input operand. We choose ADD operation because it is commonly supported by
+// most drivers.
+TestModel createSingleAddModel(const TestOperand& operand) {
+    CHECK(isInChoices(operand.type));
+
+    TestOperand act = {
+            .type = TestOperandType::INT32,
+            .dimensions = {},
+            .numberOfConsumers = 1,
+            .scale = 0.0f,
+            .zeroPoint = 0,
+            .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+    };
+
+    TestOperand output = operand;
+    output.numberOfConsumers = 0;
+    output.lifetime = TestOperandLifeTime::SUBGRAPH_OUTPUT;
+
+    TestModel model = {
+            .main =
+                    {
+                            .operands =
+                                    {
+                                            operand,
+                                            operand,
+                                            std::move(act),
+                                            output,
+                                    },
+                            .operations = {{.type = TestOperationType::ADD,
+                                            .inputs = {0, 1, 2},
+                                            .outputs = {3}}},
+                            .inputIndexes = {0, 1, 2},
+                            .outputIndexes = {3},
+                    },
+    };
+    createDummyData(&model);
+    return model;
+}
+
+// A dummy invalid IPreparedModel class for MemoryDomainAllocateTest.InvalidPreparedModel
+class InvalidPreparedModel : public IPreparedModel {
+  public:
+    Return<V1_0::ErrorStatus> execute(const V1_0::Request&,
+                                      const sp<V1_0::IExecutionCallback>&) override {
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
+    }
+    Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request&, V1_2::MeasureTiming,
+                                          const sp<V1_2::IExecutionCallback>&) override {
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
+    }
+    Return<V1_3::ErrorStatus> execute_1_3(const V1_3::Request&, V1_2::MeasureTiming,
+                                          const V1_3::OptionalTimePoint&,
+                                          const V1_3::OptionalTimeoutDuration&,
+                                          const sp<V1_3::IExecutionCallback>&) override {
+        return V1_3::ErrorStatus::GENERAL_FAILURE;
+    }
+    Return<void> executeSynchronously(const V1_0::Request&, V1_2::MeasureTiming,
+                                      executeSynchronously_cb) override {
+        return Void();
+    }
+    Return<void> executeSynchronously_1_3(const V1_3::Request&, V1_2::MeasureTiming,
+                                          const V1_3::OptionalTimePoint&,
+                                          const V1_3::OptionalTimeoutDuration&,
+                                          executeSynchronously_1_3_cb) override {
+        return Void();
+    }
+    Return<void> configureExecutionBurst(const sp<V1_2::IBurstCallback>&,
+                                         const MQDescriptorSync<V1_2::FmqRequestDatum>&,
+                                         const MQDescriptorSync<V1_2::FmqResultDatum>&,
+                                         configureExecutionBurst_cb) override {
+        return Void();
+    }
+    Return<void> executeFenced(const V1_3::Request&, const hidl_vec<hidl_handle>&,
+                               V1_2::MeasureTiming, const V1_3::OptionalTimePoint&,
+                               const V1_3::OptionalTimeoutDuration&,
+                               const V1_3::OptionalTimeoutDuration&, executeFenced_cb) override {
+        return Void();
+    }
+};
+
+}  // namespace
+
+class MemoryDomainTestBase : public testing::Test {
+  protected:
+    MemoryDomainTestBase(sp<IDevice> device, TestOperandType type)
+        : kDevice(std::move(device)),
+          kTestOperandType(type),
+          kTestOperand(kTestOperandMap.at(type)),
+          kTestOperandDataSize(nn::nonExtensionOperandSizeOfData(static_cast<OperandType>(type),
+                                                                 kTestOperand.dimensions)) {}
+
+    void SetUp() override {
+        testing::Test::SetUp();
+        ASSERT_NE(kDevice, nullptr);
+    }
+
+    sp<IPreparedModel> createConvPreparedModel(const TestOperand& testOperand,
+                                               uint32_t numOperations = 1) {
+        const TestModel testModel = createConvModel(testOperand, numOperations);
+        const Model model = createModel(testModel);
+        sp<IPreparedModel> preparedModel;
+        createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
+        return preparedModel;
+    }
+
+    sp<IPreparedModel> createAddPreparedModel(const TestOperand& testOperand) {
+        const TestModel testModel = createSingleAddModel(testOperand);
+        const Model model = createModel(testModel);
+        sp<IPreparedModel> preparedModel;
+        createPreparedModel(kDevice, model, &preparedModel, /*reportSkipping=*/false);
+        return preparedModel;
+    }
+
+    static const std::map<TestOperandType, TestOperand> kTestOperandMap;
+
+    const sp<IDevice> kDevice;
+    const TestOperandType kTestOperandType;
+    const TestOperand& kTestOperand;
+    const uint32_t kTestOperandDataSize;
+};
+
+const std::map<TestOperandType, TestOperand> MemoryDomainTestBase::kTestOperandMap = {
+        {TestOperandType::TENSOR_FLOAT32,
+         {
+                 .type = TestOperandType::TENSOR_FLOAT32,
+                 .dimensions = {1, 32, 32, 8},
+                 .numberOfConsumers = 1,
+                 .scale = 0.0f,
+                 .zeroPoint = 0,
+                 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+         }},
+        {TestOperandType::TENSOR_FLOAT16,
+         {
+                 .type = TestOperandType::TENSOR_FLOAT16,
+                 .dimensions = {1, 32, 32, 8},
+                 .numberOfConsumers = 1,
+                 .scale = 0.0f,
+                 .zeroPoint = 0,
+                 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+         }},
+        {TestOperandType::TENSOR_QUANT8_ASYMM,
+         {
+                 .type = TestOperandType::TENSOR_QUANT8_ASYMM,
+                 .dimensions = {1, 32, 32, 8},
+                 .numberOfConsumers = 1,
+                 .scale = 0.5f,
+                 .zeroPoint = 0,
+                 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+         }},
+        {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+         {
+                 .type = TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+                 .dimensions = {1, 32, 32, 8},
+                 .numberOfConsumers = 1,
+                 .scale = 0.5f,
+                 .zeroPoint = 0,
+                 .lifetime = TestOperandLifeTime::SUBGRAPH_INPUT,
+         }},
+};
+
+using MemoryDomainAllocateTestParam = std::tuple<NamedDevice, TestOperandType>;
+class MemoryDomainAllocateTest : public MemoryDomainTestBase,
+                                 public testing::WithParamInterface<MemoryDomainAllocateTestParam> {
+  protected:
+    MemoryDomainAllocateTest()
+        : MemoryDomainTestBase(getData(std::get<NamedDevice>(GetParam())),
+                               std::get<TestOperandType>(GetParam())) {}
+
+    struct AllocateTestArgs {
+        hidl_vec<uint32_t> dimensions;
+        hidl_vec<sp<IPreparedModel>> preparedModels;
+        hidl_vec<BufferRole> inputRoles;
+        hidl_vec<BufferRole> outputRoles;
+    };
+
+    // Validation test for IDevice::allocate. The driver is expected to fail with INVALID_ARGUMENT,
+    // or GENERAL_FAILURE if memory domain is not supported.
+    void validateAllocate(AllocateTestArgs args) {
+        const auto ret = kDevice->allocate(
+                {.dimensions = std::move(args.dimensions)}, std::move(args.preparedModels),
+                std::move(args.inputRoles), std::move(args.outputRoles),
+                [](ErrorStatus status, const sp<IBuffer>& buffer, uint32_t token) {
+                    EXPECT_TRUE(status == ErrorStatus::INVALID_ARGUMENT ||
+                                status == ErrorStatus::GENERAL_FAILURE);
+                    EXPECT_EQ(buffer, nullptr);
+                    EXPECT_EQ(token, 0);
+                });
+        ASSERT_TRUE(ret.isOk());
+    }
+
+    void testConflictOperands(const sp<IPreparedModel>& model1, const sp<IPreparedModel>& model2) {
+        validateAllocate({
+                .preparedModels = {model1, model2},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                               {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+        });
+        validateAllocate({
+                .preparedModels = {model1, model2},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+                .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+        });
+        validateAllocate({
+                .preparedModels = {model1, model2},
+                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                                {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+        });
+    }
+};
+
+TEST_P(MemoryDomainAllocateTest, EmptyRole) {
+    // Test with empty prepared models and roles.
+    validateAllocate({});
+
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    // Test again with non-empty prepared models but empty roles.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, NullptrPreparedModel) {
+    // Test with nullptr prepared model as input role.
+    validateAllocate({
+            .preparedModels = {nullptr},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+
+    // Test with nullptr prepared model as output role.
+    validateAllocate({
+            .preparedModels = {nullptr},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, InvalidPreparedModel) {
+    sp<InvalidPreparedModel> invalidPreparedModel = new InvalidPreparedModel();
+
+    // Test with invalid prepared model as input role.
+    validateAllocate({
+            .preparedModels = {invalidPreparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+
+    // Test with invalid prepared model as output role.
+    validateAllocate({
+            .preparedModels = {invalidPreparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, InvalidModelIndex) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    // This should fail, because the model index is out of bound.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+    });
+
+    // This should fail, because the model index is out of bound.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .outputRoles = {{.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, InvalidIOIndex) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    // This should fail, because the model only has one input.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
+    });
+
+    // This should fail, because the model only has one output.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 1, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, InvalidFrequency) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    for (float invalidFreq : {10.0f, 0.0f, -0.5f}) {
+        // Test with invalid frequency for input roles.
+        validateAllocate({
+                .preparedModels = {preparedModel},
+                .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
+        });
+        // Test with invalid frequency for output roles.
+        validateAllocate({
+                .preparedModels = {preparedModel},
+                .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = invalidFreq}},
+        });
+    }
+}
+
+TEST_P(MemoryDomainAllocateTest, SameRoleSpecifiedTwice) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    // Same role with same model index.
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                           {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+    validateAllocate({
+            .preparedModels = {preparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                            {.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+
+    // Different model indexes, but logically referring to the same role.
+    validateAllocate({
+            .preparedModels = {preparedModel, preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                           {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+    });
+    validateAllocate({
+            .preparedModels = {preparedModel, preparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f},
+                            {.modelIndex = 1, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictOperandType) {
+    const std::map<TestOperandType, TestOperandType> conflictTypeMap = {
+            {TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_FLOAT16},
+            {TestOperandType::TENSOR_FLOAT16, TestOperandType::TENSOR_FLOAT32},
+            {TestOperandType::TENSOR_QUANT8_ASYMM, TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED},
+            {TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::TENSOR_QUANT8_ASYMM},
+    };
+
+    TestOperand conflictTestOperand = kTestOperand;
+    const auto it = conflictTypeMap.find(kTestOperandType);
+    ASSERT_FALSE(it == conflictTypeMap.end());
+    conflictTestOperand.type = it->second;
+
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
+    if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
+    testConflictOperands(preparedModel, conflictPreparedModel);
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictScale) {
+    if (isFloat(kTestOperandType)) return;
+
+    TestOperand conflictTestOperand = kTestOperand;
+    ASSERT_NE(conflictTestOperand.scale, 1.0f);
+    conflictTestOperand.scale = 1.0f;
+
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
+    if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
+    testConflictOperands(preparedModel, conflictPreparedModel);
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictZeroPoint) {
+    if (isFloat(kTestOperandType)) return;
+
+    TestOperand conflictTestOperand = kTestOperand;
+    ASSERT_NE(conflictTestOperand.zeroPoint, 10);
+    conflictTestOperand.zeroPoint = 10;
+
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
+    if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
+    testConflictOperands(preparedModel, conflictPreparedModel);
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoles) {
+    TestOperand conflictTestOperand = kTestOperand;
+    conflictTestOperand.dimensions.pop_back();
+
+    auto preparedModel = createAddPreparedModel(kTestOperand);
+    auto conflictPreparedModel = createAddPreparedModel(conflictTestOperand);
+    if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
+    testConflictOperands(preparedModel, conflictPreparedModel);
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoles) {
+    TestOperand conflictTestOperand = kTestOperand;
+    conflictTestOperand.dimensions[0] = 4;
+
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto conflictPreparedModel = createConvPreparedModel(conflictTestOperand);
+    if (preparedModel == nullptr || conflictPreparedModel == nullptr) return;
+    testConflictOperands(preparedModel, conflictPreparedModel);
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictRankBetweenRoleAndDesc) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    auto badDimensions = kTestOperand.dimensions;
+    badDimensions.pop_back();
+
+    validateAllocate({
+            .dimensions = badDimensions,
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+    validateAllocate({
+            .dimensions = badDimensions,
+            .preparedModels = {preparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictDimensionsBetweenRoleAndDesc) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    auto badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 4;
+
+    validateAllocate({
+            .dimensions = badDimensions,
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+    validateAllocate({
+            .dimensions = badDimensions,
+            .preparedModels = {preparedModel},
+            .outputRoles = {{.modelIndex = 0, .ioIndex = 0, .frequency = 1.0f}},
+    });
+}
+
+TEST_P(MemoryDomainAllocateTest, ConflictRankWithScalarRole) {
+    auto preparedModel = createAddPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    // This should fail, because the target operand is a scalar but a non-empty dimension is
+    // specified.
+    validateAllocate({
+            .dimensions = {1},
+            .preparedModels = {preparedModel},
+            .inputRoles = {{.modelIndex = 0, .ioIndex = 2, .frequency = 1.0f}},
+    });
+}
+
+std::string printMemoryDomainAllocateTest(
+        const testing::TestParamInfo<MemoryDomainAllocateTestParam>& info) {
+    const auto& [namedDevice, operandType] = info.param;
+    const std::string type = toString(static_cast<OperandType>(operandType));
+    return gtestCompliantName(getName(namedDevice) + "_" + type);
+}
+
+INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainAllocateTest,
+                        testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
+                        printMemoryDomainAllocateTest);
+
+class MemoryDomainCopyTestBase : public MemoryDomainTestBase {
+  protected:
+    MemoryDomainCopyTestBase(sp<IDevice> device, TestOperandType type)
+        : MemoryDomainTestBase(std::move(device), type) {}
+
+    // Allocates device memory for roles of a single prepared model.
+    // Returns {IBuffer, token} if success; returns {nullptr, 0} if not supported.
+    std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
+                                                    const std::vector<uint32_t>& inputIndexes,
+                                                    const std::vector<uint32_t>& outputIndexes,
+                                                    const std::vector<uint32_t>& dimensions) {
+        if (preparedModel == nullptr) {
+            return {nullptr, 0};
+        }
+
+        hidl_vec<BufferRole> inputRoles(inputIndexes.size()), outputRoles(outputIndexes.size());
+        auto trans = [](uint32_t ind) -> BufferRole {
+            return {.modelIndex = 0, .ioIndex = ind, .frequency = 1.0f};
+        };
+        std::transform(inputIndexes.begin(), inputIndexes.end(), inputRoles.begin(), trans);
+        std::transform(outputIndexes.begin(), outputIndexes.end(), outputRoles.begin(), trans);
+
+        sp<IBuffer> buffer;
+        uint32_t token = 0;
+        const auto ret = kDevice->allocate(
+                {.dimensions = dimensions}, {preparedModel}, std::move(inputRoles),
+                std::move(outputRoles),
+                [&buffer, &token](ErrorStatus err, const sp<IBuffer>& buf, uint32_t tok) {
+                    if (err == ErrorStatus::NONE) {
+                        EXPECT_NE(buf, nullptr);
+                        EXPECT_GT(tok, 0);
+                        buffer = buf;
+                        token = tok;
+                    } else {
+                        EXPECT_EQ(err, ErrorStatus::GENERAL_FAILURE);
+                        EXPECT_EQ(buf, nullptr);
+                        EXPECT_EQ(tok, 0);
+                    }
+                });
+        EXPECT_TRUE(ret.isOk());
+        return {std::move(buffer), token};
+    }
+
+    std::pair<sp<IBuffer>, uint32_t> allocateBuffer(const sp<IPreparedModel>& preparedModel,
+                                                    const std::vector<uint32_t>& inputIndexes,
+                                                    const std::vector<uint32_t>& outputIndexes) {
+        return allocateBuffer(preparedModel, inputIndexes, outputIndexes, {});
+    }
+
+    hidl_memory allocateSharedMemory(uint32_t size) {
+        hidl_memory memory = nn::allocateSharedMemory(size);
+        EXPECT_EQ(memory.size(), size);
+        return memory;
+    }
+
+    void testCopyFrom(const sp<IBuffer>& buffer, const hidl_memory& memory,
+                      const std::vector<uint32_t>& dimensions, ErrorStatus expectedStatus) {
+        const auto ret = buffer->copyFrom(memory, dimensions);
+        ASSERT_TRUE(ret.isOk());
+        ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
+    }
+
+    void testCopyTo(const sp<IBuffer>& buffer, const hidl_memory& memory,
+                    ErrorStatus expectedStatus) {
+        const auto ret = buffer->copyTo(memory);
+        ASSERT_TRUE(ret.isOk());
+        ASSERT_EQ(static_cast<ErrorStatus>(ret), expectedStatus);
+    }
+
+    void initializeDeviceMemory(const sp<IBuffer>& buffer) {
+        hidl_memory memory = nn::allocateSharedMemory(kTestOperandDataSize);
+        ASSERT_EQ(memory.size(), kTestOperandDataSize);
+        testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
+    }
+};
+
+using MemoryDomainCopyTestParam = std::tuple<NamedDevice, TestOperandType>;
+class MemoryDomainCopyTest : public MemoryDomainCopyTestBase,
+                             public testing::WithParamInterface<MemoryDomainCopyTestParam> {
+  protected:
+    MemoryDomainCopyTest()
+        : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
+                                   std::get<TestOperandType>(GetParam())) {}
+};
+
+TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
+    hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
+    hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
+    testCopyFrom(buffer, badMemory1, {}, ErrorStatus::INVALID_ARGUMENT);
+    testCopyFrom(buffer, badMemory2, {}, ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidMemorySize_DynamicShape) {
+    TestOperand testOperand = kTestOperand;
+    testOperand.dimensions[0] = 0;
+    auto preparedModel = createConvPreparedModel(testOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
+    hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
+    hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
+    hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
+
+    auto badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 2;
+
+    testCopyFrom(buffer, badMemory1, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
+    testCopyFrom(buffer, badMemory2, kTestOperand.dimensions, ErrorStatus::INVALID_ARGUMENT);
+    testCopyFrom(buffer, goodMemory, kTestOperand.dimensions, ErrorStatus::NONE);
+    testCopyFrom(buffer, goodMemory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
+
+    std::vector<uint32_t> badDimensions;
+    badDimensions = kTestOperand.dimensions;
+    badDimensions.pop_back();
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 2;
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 0;
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    testCopyFrom(buffer, memory, {}, ErrorStatus::NONE);
+    testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyFrom_InvalidDimensions_DynamicShape) {
+    TestOperand testOperand = kTestOperand;
+    testOperand.dimensions[0] = 0;
+    auto preparedModel = createConvPreparedModel(testOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
+
+    std::vector<uint32_t> badDimensions;
+    badDimensions = kTestOperand.dimensions;
+    badDimensions.pop_back();
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 2;
+    badDimensions[3] = 4;
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 1;
+    badDimensions[3] = 0;
+    testCopyFrom(buffer, memory, badDimensions, ErrorStatus::INVALID_ARGUMENT);
+
+    testCopyFrom(buffer, memory, {}, ErrorStatus::INVALID_ARGUMENT);
+    testCopyFrom(buffer, memory, kTestOperand.dimensions, ErrorStatus::NONE);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyTo_UninitializedMemory) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    hidl_memory memory = allocateSharedMemory(kTestOperandDataSize);
+    testCopyTo(buffer, memory, ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
+    hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
+    hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
+    hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
+
+    initializeDeviceMemory(buffer);
+    testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
+    testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
+    testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
+}
+
+TEST_P(MemoryDomainCopyTest, CopyTo_InvalidMemorySize_DynamicShape) {
+    TestOperand testOperand = kTestOperand;
+    testOperand.dimensions[0] = 0;
+    auto preparedModel = createConvPreparedModel(testOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    uint32_t badMemorySize1 = kTestOperandDataSize / 2, badMemorySize2 = kTestOperandDataSize * 2;
+    hidl_memory badMemory1 = allocateSharedMemory(badMemorySize1);
+    hidl_memory badMemory2 = allocateSharedMemory(badMemorySize2);
+    hidl_memory goodMemory = allocateSharedMemory(kTestOperandDataSize);
+
+    initializeDeviceMemory(buffer);
+    testCopyTo(buffer, badMemory1, ErrorStatus::INVALID_ARGUMENT);
+    testCopyTo(buffer, badMemory2, ErrorStatus::INVALID_ARGUMENT);
+    testCopyTo(buffer, goodMemory, ErrorStatus::NONE);
+}
+
+std::string printMemoryDomainCopyTest(
+        const testing::TestParamInfo<MemoryDomainCopyTestParam>& info) {
+    const auto& [namedDevice, operandType] = info.param;
+    const std::string type = toString(static_cast<OperandType>(operandType));
+    return gtestCompliantName(getName(namedDevice) + "_" + type);
+}
+
+INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainCopyTest,
+                        testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices),
+                        printMemoryDomainCopyTest);
+
+using MemoryDomainExecutionTestParam = std::tuple<NamedDevice, TestOperandType, Executor>;
+class MemoryDomainExecutionTest
+    : public MemoryDomainCopyTestBase,
+      public testing::WithParamInterface<MemoryDomainExecutionTestParam> {
+  protected:
+    MemoryDomainExecutionTest()
+        : MemoryDomainCopyTestBase(getData(std::get<NamedDevice>(GetParam())),
+                                   std::get<TestOperandType>(GetParam())) {}
+
+    Request::MemoryPool createSharedMemoryPool(uint32_t size) {
+        hidl_memory memory = allocateSharedMemory(size);
+        Request::MemoryPool pool;
+        pool.hidlMemory(memory);
+        return pool;
+    }
+
+    Request::MemoryPool createDeviceMemoryPool(uint32_t token) {
+        Request::MemoryPool pool;
+        pool.token(token);
+        return pool;
+    }
+
+    void testExecution(const sp<IPreparedModel>& preparedModel, const Request& request,
+                       ErrorStatus expectedStatus) {
+        switch (kExecutor) {
+            case Executor::ASYNC:
+                EXPECT_EQ(executeAsync(preparedModel, request), expectedStatus);
+                break;
+            case Executor::SYNC:
+                EXPECT_EQ(executeSync(preparedModel, request), expectedStatus);
+                break;
+            case Executor::FENCED:
+                EXPECT_EQ(executeFenced(preparedModel, request), expectedStatus);
+                break;
+            default:
+                ASSERT_TRUE(false);
+        }
+    }
+
+    ErrorStatus executeAsync(const sp<IPreparedModel>& preparedModel, const Request& request) {
+        ErrorStatus executionStatus;
+
+        // launch execution
+        sp<ExecutionCallback> executionCallback = new ExecutionCallback();
+        const auto ret =
+                preparedModel->execute_1_3(request, MeasureTiming::NO, {}, {}, executionCallback);
+        EXPECT_TRUE(ret.isOk());
+        executionStatus = static_cast<ErrorStatus>(ret);
+
+        // retrieve execution status
+        executionCallback->wait();
+        if (executionStatus == ErrorStatus::NONE) {
+            executionStatus = executionCallback->getStatus();
+        } else {
+            EXPECT_EQ(executionStatus, executionCallback->getStatus());
+        }
+        const auto timing = executionCallback->getTiming();
+        EXPECT_EQ(UINT64_MAX, timing.timeOnDevice);
+        EXPECT_EQ(UINT64_MAX, timing.timeInDriver);
+        if (executionStatus != ErrorStatus::NONE) {
+            EXPECT_EQ(executionCallback->getOutputShapes().size(), 0);
+        }
+        return executionStatus;
+    }
+
+    ErrorStatus executeSync(const sp<IPreparedModel>& preparedModel, const Request& request) {
+        ErrorStatus executionStatus;
+        const auto ret = preparedModel->executeSynchronously_1_3(
+                request, MeasureTiming::NO, {}, {},
+                [&executionStatus](ErrorStatus error, const hidl_vec<OutputShape>& shapes,
+                                   const Timing& time) {
+                    executionStatus = error;
+                    EXPECT_EQ(UINT64_MAX, time.timeOnDevice);
+                    EXPECT_EQ(UINT64_MAX, time.timeInDriver);
+                    if (executionStatus != ErrorStatus::NONE) {
+                        EXPECT_EQ(shapes.size(), 0);
+                    }
+                });
+        EXPECT_TRUE(ret.isOk());
+        return executionStatus;
+    }
+
+    ErrorStatus executeFenced(const sp<IPreparedModel>& preparedModel, const Request& request) {
+        ErrorStatus executionStatus;
+        hidl_handle syncFenceHandle;
+        sp<IFencedExecutionCallback> fencedCallback;
+        const auto callbackFunc = [&executionStatus, &syncFenceHandle, &fencedCallback](
+                                          ErrorStatus error, const hidl_handle& handle,
+                                          const sp<IFencedExecutionCallback>& callback) {
+            executionStatus = error;
+            syncFenceHandle = handle;
+            fencedCallback = callback;
+        };
+        Return<void> ret = preparedModel->executeFenced(request, {}, MeasureTiming::NO, {}, {}, {},
+                                                        callbackFunc);
+        EXPECT_TRUE(ret.isOk());
+        if (executionStatus != ErrorStatus::NONE) {
+            EXPECT_EQ(syncFenceHandle.getNativeHandle(), nullptr);
+            EXPECT_EQ(fencedCallback, nullptr);
+            return executionStatus;
+        }
+        if (syncFenceHandle.getNativeHandle()) {
+            waitForSyncFence(syncFenceHandle.getNativeHandle()->data[0]);
+        }
+        EXPECT_NE(fencedCallback, nullptr);
+        ret = fencedCallback->getExecutionInfo(
+                [&executionStatus](ErrorStatus error, Timing t, Timing) {
+                    executionStatus = error;
+                    EXPECT_EQ(UINT64_MAX, t.timeOnDevice);
+                    EXPECT_EQ(UINT64_MAX, t.timeInDriver);
+                });
+        EXPECT_TRUE(ret.isOk());
+        return executionStatus;
+    }
+
+    const Executor kExecutor = std::get<Executor>(GetParam());
+};
+
+TEST_P(MemoryDomainExecutionTest, InvalidToken) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    if (preparedModel == nullptr) return;
+
+    Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool badDeviceMemory1 = createDeviceMemoryPool(0);    // Invalid token.
+    Request::MemoryPool badDeviceMemory2 = createDeviceMemoryPool(100);  // Unknown token.
+    RequestArgument sharedMemoryArg = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
+
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, badDeviceMemory1}},
+                  ErrorStatus::INVALID_ARGUMENT);
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, badDeviceMemory2}},
+                  ErrorStatus::INVALID_ARGUMENT);
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, badDeviceMemory1}},
+                  ErrorStatus::INVALID_ARGUMENT);
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, badDeviceMemory2}},
+                  ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainExecutionTest, InvalidPreparedModel) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+    auto badPreparedModel = createConvPreparedModel(kTestOperand);
+    if (badPreparedModel == nullptr) return;
+
+    Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
+    RequestArgument sharedMemoryArg = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
+
+    // This should fail, because the buffer is not allocated for badPreparedModel.
+    initializeDeviceMemory(buffer);
+    testExecution(badPreparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+    testExecution(badPreparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainExecutionTest, InvalidIOIndex) {
+    auto preparedModel = createConvPreparedModel(kTestOperand, 2);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {});
+    if (buffer == nullptr) return;
+
+    Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool sharedMemory3 = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
+    RequestArgument sharedMemoryArg1 = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument sharedMemoryArg2 = {
+            .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument sharedMemoryArg3 = {
+            .location = {.poolIndex = 2, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 3}};
+
+    // This should fail, because the device memory is not allocated for input 1.
+    initializeDeviceMemory(buffer);
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg1, deviceMemoryArg},
+                   .outputs = {sharedMemoryArg2, sharedMemoryArg3},
+                   .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    // This should fail, because the device memory is not allocated for output 1.
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
+                   .outputs = {sharedMemoryArg3, deviceMemoryArg},
+                   .pools = {sharedMemory1, sharedMemory2, sharedMemory3, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainExecutionTest, InvalidIOType) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [inputBuffer, inputToken] = allocateBuffer(preparedModel, {0}, {});
+    auto [outputBuffer, outputToken] = allocateBuffer(preparedModel, {}, {0});
+    if (inputBuffer == nullptr || outputBuffer == nullptr) return;
+
+    Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(inputToken);
+    RequestArgument sharedMemoryArg = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
+
+    // This should fail, because the device memory is allocated for input but used as output.
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    // This should fail, because the device memory is allocated for output but used as input.
+    deviceMemory.token(outputToken);
+    initializeDeviceMemory(outputBuffer);
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST_P(MemoryDomainExecutionTest, UninitializedMemory) {
+    auto preparedModel = createConvPreparedModel(kTestOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0});
+    if (buffer == nullptr) return;
+
+    Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
+    RequestArgument sharedMemoryArg = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
+
+    // This should fail, because the device memory is not initialized.
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::GENERAL_FAILURE);
+
+    // This should initialize the device memory.
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::NONE);
+
+    // Test again with initialized device memory.
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::NONE);
+}
+
+TEST_P(MemoryDomainExecutionTest, SameRequestMultipleRoles) {
+    auto preparedModel = createConvPreparedModel(kTestOperand, 2);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0, 1}, {0, 1});
+    if (buffer == nullptr) return;
+
+    Request::MemoryPool sharedMemory1 = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool sharedMemory2 = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
+    RequestArgument sharedMemoryArg1 = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument sharedMemoryArg2 = {
+            .location = {.poolIndex = 1, .offset = 0, .length = kTestOperandDataSize}};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 2}};
+
+    // This should fail, because the same device memory cannot be used for both input and output.
+    initializeDeviceMemory(buffer);
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg, sharedMemoryArg1},
+                   .outputs = {deviceMemoryArg, sharedMemoryArg2},
+                   .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    // This should fail, because the same device memory cannot be used for multiple outputs.
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg1, sharedMemoryArg2},
+                   .outputs = {deviceMemoryArg, deviceMemoryArg},
+                   .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    // The same device memory can be used for multiple inputs.
+    initializeDeviceMemory(buffer);
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArg, deviceMemoryArg},
+                   .outputs = {sharedMemoryArg1, sharedMemoryArg2},
+                   .pools = {sharedMemory1, sharedMemory2, deviceMemory}},
+                  ErrorStatus::NONE);
+}
+
+TEST_P(MemoryDomainExecutionTest, InvalidDimensions) {
+    // FENCED execution does not support dynamic shape.
+    if (kExecutor == Executor::FENCED) return;
+
+    TestOperand testOperand = kTestOperand;
+    testOperand.dimensions[0] = 0;
+    auto preparedModel = createConvPreparedModel(testOperand);
+    auto [buffer, token] = allocateBuffer(preparedModel, {0}, {0}, kTestOperand.dimensions);
+    if (buffer == nullptr) return;
+
+    Request::MemoryPool sharedMemory = createSharedMemoryPool(kTestOperandDataSize);
+    Request::MemoryPool deviceMemory = createDeviceMemoryPool(token);
+    auto badDimensions = kTestOperand.dimensions;
+    badDimensions[0] = 2;
+    RequestArgument sharedMemoryArg = {
+            .location = {.poolIndex = 0, .offset = 0, .length = kTestOperandDataSize},
+            .dimensions = badDimensions};
+    RequestArgument deviceMemoryArg = {.location = {.poolIndex = 1}};
+    RequestArgument deviceMemoryArgWithBadDimensions = {.location = {.poolIndex = 1},
+                                                        .dimensions = badDimensions};
+
+    initializeDeviceMemory(buffer);
+    testExecution(preparedModel,
+                  {.inputs = {deviceMemoryArgWithBadDimensions},
+                   .outputs = {sharedMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArgWithBadDimensions},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::INVALID_ARGUMENT);
+
+    testExecution(preparedModel,
+                  {.inputs = {sharedMemoryArg},
+                   .outputs = {deviceMemoryArg},
+                   .pools = {sharedMemory, deviceMemory}},
+                  ErrorStatus::GENERAL_FAILURE);
+}
+
+const auto kExecutorChoices = testing::Values(Executor::ASYNC, Executor::SYNC, Executor::FENCED);
+
+std::string printMemoryDomainExecutionTest(
+        const testing::TestParamInfo<MemoryDomainExecutionTestParam>& info) {
+    const auto& [namedDevice, operandType, executor] = info.param;
+    const std::string type = toString(static_cast<OperandType>(operandType));
+    const std::string executorStr = toString(executor);
+    return gtestCompliantName(getName(namedDevice) + "_" + type + "_" + executorStr);
+}
+
+INSTANTIATE_TEST_CASE_P(TestMemoryDomain, MemoryDomainExecutionTest,
+                        testing::Combine(kNamedDeviceChoices, kTestOperandTypeChoices,
+                                         kExecutorChoices),
+                        printMemoryDomainExecutionTest);
+
+}  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
index 879989e..2ef1e8f 100644
--- a/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
+++ b/neuralnetworks/1.3/vts/functional/QualityOfServiceTests.cpp
@@ -214,7 +214,8 @@
 }
 
 void runExecutionTest(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                      const Request& request, bool synchronous, DeadlineBoundType deadlineBound) {
+                      const Request& request, const ExecutionContext& context, bool synchronous,
+                      DeadlineBoundType deadlineBound) {
     const ExecutionFunction execute = synchronous ? executeSynchronously : executeAsynchronously;
     const auto deadline = makeDeadline(deadlineBound);
 
@@ -261,7 +262,7 @@
     // Retrieve execution results.
     ASSERT_TRUE(nn::compliantWithV1_0(request));
     const V1_0::Request request10 = nn::convertToV1_0(request);
-    const std::vector<TestBuffer> outputs = getOutputBuffers(request10);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(request10);
 
     // We want "close-enough" results.
     if (status == ErrorStatus::NONE) {
@@ -270,10 +271,11 @@
 }
 
 void runExecutionTests(const sp<IPreparedModel>& preparedModel, const TestModel& testModel,
-                       const Request& request) {
+                       const Request& request, const ExecutionContext& context) {
     for (bool synchronous : {false, true}) {
         for (auto deadlineBound : deadlineBounds) {
-            runExecutionTest(preparedModel, testModel, request, synchronous, deadlineBound);
+            runExecutionTest(preparedModel, testModel, request, context, synchronous,
+                             deadlineBound);
         }
     }
 }
@@ -291,8 +293,9 @@
     if (preparedModel == nullptr) return;
 
     // run execution tests
-    const Request request = nn::convertToV1_3(createRequest(testModel));
-    runExecutionTests(preparedModel, testModel, request);
+    ExecutionContext context;
+    const Request request = nn::convertToV1_3(context.createRequest(testModel));
+    runExecutionTests(preparedModel, testModel, request, context);
 }
 
 class DeadlineTest : public GeneratedTestBase {};
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
index 5b07034..df1e453 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.cpp
@@ -177,7 +177,8 @@
 
 TEST_P(ValidationTest, Test) {
     const Model model = createModel(kTestModel);
-    const Request request = nn::convertToV1_3(createRequest(kTestModel));
+    ExecutionContext context;
+    const Request request = nn::convertToV1_3(context.createRequest(kTestModel));
     if (kTestModel.expectFailure) {
         validateFailure(kDevice, model, request);
     } else {
@@ -185,11 +186,31 @@
     }
 }
 
-INSTANTIATE_GENERATED_TEST(ValidationTest, [](const test_helper::TestModel&) { return true; });
+INSTANTIATE_GENERATED_TEST(ValidationTest, [](const std::string& testName) {
+    // Skip validation for the "inputs_as_internal" and "all_tensors_as_inputs"
+    // generated tests.
+    return testName.find("inputs_as_internal") == std::string::npos &&
+           testName.find("all_tensors_as_inputs") == std::string::npos;
+});
 
 sp<IPreparedModel> getPreparedModel_1_3(const sp<PreparedModelCallback>& callback) {
     sp<V1_0::IPreparedModel> preparedModelV1_0 = callback->getPreparedModel();
     return IPreparedModel::castFrom(preparedModelV1_0).withDefault(nullptr);
 }
 
+std::string toString(Executor executor) {
+    switch (executor) {
+        case Executor::ASYNC:
+            return "ASYNC";
+        case Executor::SYNC:
+            return "SYNC";
+        case Executor::BURST:
+            return "BURST";
+        case Executor::FENCED:
+            return "FENCED";
+        default:
+            CHECK(false);
+    }
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
diff --git a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
index 4e51052..de082c3 100644
--- a/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
+++ b/neuralnetworks/1.3/vts/functional/VtsHalNeuralnetworks.h
@@ -52,6 +52,10 @@
 // Utility function to get PreparedModel from callback and downcast to V1_2.
 sp<IPreparedModel> getPreparedModel_1_3(const sp<implementation::PreparedModelCallback>& callback);
 
+enum class Executor { ASYNC, SYNC, BURST, FENCED };
+
+std::string toString(Executor executor);
+
 }  // namespace android::hardware::neuralnetworks::V1_3::vts::functional
 
 #endif  // ANDROID_HARDWARE_NEURALNETWORKS_V1_3_VTS_HAL_NEURALNETWORKS_H
diff --git a/radio/1.5/types.hal b/radio/1.5/types.hal
index 45f3b90..248f56e 100644
--- a/radio/1.5/types.hal
+++ b/radio/1.5/types.hal
@@ -1023,16 +1023,37 @@
  * 3GPP2 C.S0068-0.
  */
 enum PersoSubstate : @1.0::PersoSubstate {
+    /**
+     * The device is personalized using the content of the Service Provider Name (SPN) in the SIM
+     * card.
+     */
     SIM_SPN,
     SIM_SPN_PUK,
-    /** Equivalent Home PLMN */
+    /**
+     * Service Provider and Equivalent Home PLMN
+     * The device is personalized using both the content of the GID1 (equivalent to service provider
+     * personalization) and the content of the Equivalent Home PLMN (EHPLMN) in the SIM card.
+     * If the GID1 in the SIM is absent, then just the content of the Equivalent Home PLMN
+     * is matched.
+     */
     SIM_SP_EHPLMN,
     SIM_SP_EHPLMN_PUK,
+    /**
+     * Device is personalized using the first digits of the ICCID of the SIM card.
+     */
     SIM_ICCID,
     SIM_ICCID_PUK,
+    /**
+     * Device is personalized using the content of the IMPI in the ISIM.
+     */
     SIM_IMPI,
     SIM_IMPI_PUK,
-    /** Network subset service provider */
+    /**
+     * Network Subset and Service Provider
+     * Device is personalized using both the content of GID1 (equivalent to service provider
+     * personalization) and the first digits of the IMSI (equivalent to network subset
+     * personalization).
+     */
     SIM_NS_SP,
     SIM_NS_SP_PUK,
 };
diff --git a/sensors/2.0/multihal/Android.bp b/sensors/2.0/multihal/Android.bp
index 7213b44..3ce3390 100644
--- a/sensors/2.0/multihal/Android.bp
+++ b/sensors/2.0/multihal/Android.bp
@@ -28,6 +28,7 @@
     shared_libs: [
         "android.hardware.sensors@2.0",
         "android.hardware.sensors@2.0-ScopedWakelock",
+        "android.hardware.sensors@2.1",
         "libbase",
         "libcutils",
         "libfmq",
diff --git a/sensors/common/default/2.X/multihal/Android.bp b/sensors/common/default/2.X/multihal/Android.bp
index 2b4b3bf..6122323 100644
--- a/sensors/common/default/2.X/multihal/Android.bp
+++ b/sensors/common/default/2.X/multihal/Android.bp
@@ -16,11 +16,12 @@
 cc_defaults {
     name: "android.hardware.sensors@2.X-multihal-defaults",
     header_libs: [
-        "android.hardware.sensors@2.0-multihal.header",
+        "android.hardware.sensors@2.X-multihal.header",
     ],
     shared_libs: [
         "android.hardware.sensors@1.0",
         "android.hardware.sensors@2.0",
+        "android.hardware.sensors@2.1",
         "libbase",
         "libcutils",
         "libfmq",
@@ -32,9 +33,24 @@
     cflags: ["-DLOG_TAG=\"SensorsMultiHal\""],
 }
 
+// Header target for sub-HALs that implement the Multi-HAL 2.0 interface
 cc_library_headers {
     name: "android.hardware.sensors@2.0-multihal.header",
     vendor_available: true,
+    export_include_dirs: ["include/V2_0"],
+}
+
+// Header target for sub-HALs that implement the Multi-HAL 2.1 interface
+cc_library_headers {
+    name: "android.hardware.sensors@2.1-multihal.header",
+    vendor_available: true,
+    export_include_dirs: ["include/V2_1"],
+}
+
+// Header target for Multi-HAL so it can reference both 2.0/2.1 headers
+cc_library_headers {
+    name: "android.hardware.sensors@2.X-multihal.header",
+    vendor_available: true,
     export_include_dirs: ["include"],
 }
 
@@ -49,7 +65,7 @@
     ],
     vendor_available: true,
     export_header_lib_headers: [
-        "android.hardware.sensors@2.0-multihal.header",
+        "android.hardware.sensors@2.X-multihal.header",
     ],
 }
 
@@ -62,6 +78,9 @@
     srcs: [
         "ScopedWakelock.cpp",
     ],
+    header_libs: [
+        "android.hardware.sensors@2.0-multihal.header",
+    ],
     vendor_available: true,
     export_header_lib_headers: [
         "android.hardware.sensors@2.0-multihal.header",
@@ -78,6 +97,9 @@
         "ScopedWakelock.cpp",
     ],
     vendor_available: true,
+    header_libs: [
+        "android.hardware.sensors@2.0-multihal.header",
+    ],
     export_header_lib_headers: [
         "android.hardware.sensors@2.0-multihal.header",
     ],
diff --git a/sensors/common/default/2.X/multihal/HalProxy.cpp b/sensors/common/default/2.X/multihal/HalProxy.cpp
index ac6f17a..518e138 100644
--- a/sensors/common/default/2.X/multihal/HalProxy.cpp
+++ b/sensors/common/default/2.X/multihal/HalProxy.cpp
@@ -16,8 +16,6 @@
 
 #include "HalProxy.h"
 
-#include "SubHal.h"
-
 #include <android/hardware/sensors/2.0/types.h>
 
 #include <android-base/file.h>
diff --git a/sensors/common/default/2.X/multihal/ScopedWakelock.cpp b/sensors/common/default/2.X/multihal/ScopedWakelock.cpp
index d85d4a7..bf2ad35 100644
--- a/sensors/common/default/2.X/multihal/ScopedWakelock.cpp
+++ b/sensors/common/default/2.X/multihal/ScopedWakelock.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "ScopedWakelock.h"
+#include "V2_0/ScopedWakelock.h"
 
 namespace android {
 namespace hardware {
diff --git a/sensors/common/default/2.X/multihal/include/HalProxy.h b/sensors/common/default/2.X/multihal/include/HalProxy.h
index 978f7cf..d7e8795 100644
--- a/sensors/common/default/2.X/multihal/include/HalProxy.h
+++ b/sensors/common/default/2.X/multihal/include/HalProxy.h
@@ -16,8 +16,9 @@
 
 #pragma once
 
-#include "ScopedWakelock.h"
-#include "SubHal.h"
+#include "V2_0/ScopedWakelock.h"
+#include "V2_0/SubHal.h"
+#include "V2_1/SubHal.h"
 
 #include <android/hardware/sensors/2.0/ISensors.h>
 #include <android/hardware/sensors/2.0/types.h>
diff --git a/sensors/common/default/2.X/multihal/include/ScopedWakelock.h b/sensors/common/default/2.X/multihal/include/V2_0/ScopedWakelock.h
similarity index 100%
rename from sensors/common/default/2.X/multihal/include/ScopedWakelock.h
rename to sensors/common/default/2.X/multihal/include/V2_0/ScopedWakelock.h
diff --git a/sensors/common/default/2.X/multihal/include/SubHal.h b/sensors/common/default/2.X/multihal/include/V2_0/SubHal.h
similarity index 99%
rename from sensors/common/default/2.X/multihal/include/SubHal.h
rename to sensors/common/default/2.X/multihal/include/V2_0/SubHal.h
index 92ae3a6..2a80e6b 100644
--- a/sensors/common/default/2.X/multihal/include/SubHal.h
+++ b/sensors/common/default/2.X/multihal/include/V2_0/SubHal.h
@@ -159,7 +159,7 @@
  * library. This function will only be invoked once at initialization time.
  *
  * NOTE: The supported sensors HAL version must match SUB_HAL_2_0_VERSION exactly or the HalProxy
- * will fail to initialize.
+ *     will fail to initialize.
  *
  * @param uint32_t when this function returns, this parameter must contain the HAL version that
  *     this sub-HAL supports. To support this version of multi-HAL, this must be set to
diff --git a/sensors/common/default/2.X/multihal/include/SubHal.h b/sensors/common/default/2.X/multihal/include/V2_1/SubHal.h
similarity index 78%
copy from sensors/common/default/2.X/multihal/include/SubHal.h
copy to sensors/common/default/2.X/multihal/include/V2_1/SubHal.h
index 92ae3a6..cd49422 100644
--- a/sensors/common/default/2.X/multihal/include/SubHal.h
+++ b/sensors/common/default/2.X/multihal/include/V2_1/SubHal.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2019 The Android Open Source Project
+ * Copyright (C) 2020 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,27 +16,23 @@
 
 #pragma once
 
-#include "ScopedWakelock.h"
+#include "V2_0/ScopedWakelock.h"
 
-#include <android/hardware/sensors/1.0/types.h>
-#include <android/hardware/sensors/2.0/ISensors.h>
+#include <android/hardware/sensors/2.1/ISensors.h>
+#include <android/hardware/sensors/2.1/types.h>
 
 #include <vector>
 
 // Indicates the current version of the multiHAL interface formatted as (HAL major version) << 24 |
 // (HAL minor version) << 16 | (multiHAL version)
-#define SUB_HAL_2_0_VERSION 0x02000000
+#define SUB_HAL_2_1_VERSION 0x02010000
 
 namespace android {
 namespace hardware {
 namespace sensors {
-namespace V2_0 {
+namespace V2_1 {
 namespace implementation {
 
-using ::android::hardware::sensors::V1_0::Event;
-using ::android::hardware::sensors::V1_0::Result;
-using ::android::hardware::sensors::V1_0::SensorInfo;
-
 /**
  * Interface that contains several callbacks into the HalProxy class to communicate dynamic sensor
  * changes and sensor events to the framework and acquire wake locks. The HalProxy will ensure
@@ -45,6 +41,8 @@
  */
 class IHalProxyCallback : public ISensorsCallback {
   public:
+    using ScopedWakelock = V2_0::implementation::ScopedWakelock;
+
     /**
      * Thread-safe callback used to post events to the HalProxy. Sub-HALs should invoke this
      * whenever new sensor events need to be delivered to the sensors framework. Once invoked, the
@@ -55,7 +53,7 @@
      *
      * The provided ScopedWakelock must be locked if the events are from wakeup sensors. If it's
      * not locked accordingly, the HalProxy will crash as this indicates the sub-HAL isn't compliant
-     * with the sensors HAL 2.0 specification. Additionally, since ScopedWakelock isn't copyable,
+     * with the sensors HAL specification. Additionally, since ScopedWakelock isn't copyable,
      * the HalProxy will take ownership of the wake lock given when this method is invoked. Once the
      * method returns, the HalProxy will handle holding the wake lock, if necessary, until the
      * framework has successfully processed any wakeup events.
@@ -71,7 +69,7 @@
      * @param wakelock ScopedWakelock that should be locked to send events from wake sensors and
      *     unlocked otherwise.
      */
-    virtual void postEvents(const std::vector<Event>& events, ScopedWakelock wakelock) = 0;
+    virtual void postEvents(const std::vector<V2_1::Event>& events, ScopedWakelock wakelock) = 0;
 
     /**
      * Initializes a ScopedWakelock on the stack that, when locked, will increment the reference
@@ -86,9 +84,9 @@
 
 /**
  * ISensorsSubHal is an interface that sub-HALs must implement in order to be compliant with
- * multihal 2.0 and in order for the HalProxy to successfully load and communicate with the sub-HAL.
+ * multihal and in order for the HalProxy to successfully load and communicate with the sub-HAL.
  *
- * Any vendor wishing to implement this interface and support multihal 2.0 will need to create a
+ * Any vendor wishing to implement this interface and support multihal will need to create a
  * dynamic library that exposes sensorsHalGetSubHal (defined below). This library will be loaded by
  * the HalProxy when the sensors HAL is initialized and then the HalProxy will retrieve the vendor's
  * implementation of sensorsHalGetSubHal.
@@ -103,11 +101,25 @@
   public:
     // The ISensors version of initialize isn't used for multihal. Instead, sub-HALs must implement
     // the version below to allow communciation logic to centralized in the HalProxy
-    Return<Result> initialize(
-            const ::android::hardware::MQDescriptorSync<Event>& /* eventQueueDescriptor */,
+    Return<V1_0::Result> initialize_2_1(
+            const ::android::hardware::MQDescriptorSync<V2_1::Event>& /* eventQueueDescriptor */,
             const ::android::hardware::MQDescriptorSync<uint32_t>& /* wakeLockDescriptor */,
             const sp<ISensorsCallback>& /* sensorsCallback */) final {
-        return Result::INVALID_OPERATION;
+        return V1_0::Result::INVALID_OPERATION;
+    }
+
+    Return<V1_0::Result> initialize(
+            const ::android::hardware::MQDescriptorSync<V1_0::Event>& /* eventQueueDescriptor */,
+            const ::android::hardware::MQDescriptorSync<uint32_t>& /* wakeLockDescriptor */,
+            const sp<V2_0::ISensorsCallback>& /* sensorsCallback */) final {
+        return V1_0::Result::INVALID_OPERATION;
+    }
+
+    // Several HAL 2.0 methods won't be invoked on HAL 2.1 so they are stubbed out below.
+    Return<void> getSensorsList(getSensorsList_cb /* _hidl_cb */) final { return Void(); }
+
+    Return<V1_0::Result> injectSensorData(const V1_0::Event& /* event */) final {
+        return V1_0::Result::INVALID_OPERATION;
     }
 
     /**
@@ -143,28 +155,26 @@
      *     should be created.
      * @return result OK on success
      */
-    virtual Return<Result> initialize(const sp<IHalProxyCallback>& halProxyCallback) = 0;
+    virtual Return<V1_0::Result> initialize(const sp<IHalProxyCallback>& halProxyCallback) = 0;
 };
 
 }  // namespace implementation
-}  // namespace V2_0
+}  // namespace V2_1
 }  // namespace sensors
 }  // namespace hardware
 }  // namespace android
 
-using ::android::hardware::sensors::V2_0::implementation::ISensorsSubHal;
-
 /**
  * Function that must be exported so the HalProxy class can invoke it on the sub-HAL dynamic
  * library. This function will only be invoked once at initialization time.
  *
- * NOTE: The supported sensors HAL version must match SUB_HAL_2_0_VERSION exactly or the HalProxy
- * will fail to initialize.
+ * NOTE: The supported sensors HAL version must match SUB_HAL_2_1_VERSION or the HalProxy
+ *     will fail to initialize.
  *
  * @param uint32_t when this function returns, this parameter must contain the HAL version that
- *     this sub-HAL supports. To support this version of multi-HAL, this must be set to
- *     SUB_HAL_2_0_VERSION.
+ *     this sub-HAL supports. This must be set to SUB_HAL_2_1_VERSION.
  * @return A statically allocated, valid ISensorsSubHal implementation.
  */
-__attribute__((visibility("default"))) extern "C" ISensorsSubHal* sensorsHalGetSubHal(
-        uint32_t* version);
+__attribute__((visibility(
+        "default"))) extern "C" ::android::hardware::sensors::V2_1::implementation::ISensorsSubHal*
+sensorsHalGetSubHal_2_1(uint32_t* version);
diff --git a/sensors/common/default/2.X/multihal/tests/Android.bp b/sensors/common/default/2.X/multihal/tests/Android.bp
index afb63cc..7692b51 100644
--- a/sensors/common/default/2.X/multihal/tests/Android.bp
+++ b/sensors/common/default/2.X/multihal/tests/Android.bp
@@ -26,6 +26,7 @@
         "android.hardware.sensors@1.0",
         "android.hardware.sensors@2.0",
         "android.hardware.sensors@2.0-ScopedWakelock",
+        "android.hardware.sensors@2.1",
         "libcutils",
         "libfmq",
         "libhardware",
@@ -85,6 +86,8 @@
     shared_libs: [
         "android.hardware.sensors@1.0",
         "android.hardware.sensors@2.0",
+        "android.hardware.sensors@2.0-ScopedWakelock",
+        "android.hardware.sensors@2.1",
         "libbase",
         "libcutils",
         "libfmq",
diff --git a/sensors/common/default/2.X/multihal/tests/HalProxy_test.cpp b/sensors/common/default/2.X/multihal/tests/HalProxy_test.cpp
index 4633a75..867c4a1 100644
--- a/sensors/common/default/2.X/multihal/tests/HalProxy_test.cpp
+++ b/sensors/common/default/2.X/multihal/tests/HalProxy_test.cpp
@@ -19,8 +19,8 @@
 #include <fmq/MessageQueue.h>
 
 #include "HalProxy.h"
-#include "ScopedWakelock.h"
 #include "SensorsSubHal.h"
+#include "V2_0/ScopedWakelock.h"
 
 #include <chrono>
 #include <set>
diff --git a/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h b/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
index c1e3647..6da4404 100644
--- a/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
+++ b/sensors/common/default/2.X/multihal/tests/fake_subhal/SensorsSubHal.h
@@ -16,7 +16,7 @@
 
 #pragma once
 
-#include "SubHal.h"
+#include "V2_0/SubHal.h"
 
 #include "Sensor.h"