Add BLOB AHWB tests in VTS.

Bug: 149847930
Test: 1.3 VTS
Change-Id: I9c795dcb7696c843afd12551927463c5529a4b60
diff --git a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
index e28605d..4ab228f 100644
--- a/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/1.0/vts/functional/GeneratedTestHarness.cpp
@@ -125,7 +125,9 @@
 // Test driver for those generated from ml/nn/runtime/test/spec
 void Execute(const sp<IDevice>& device, const TestModel& testModel) {
     const Model model = createModel(testModel);
-    const Request request = createRequest(testModel);
+
+    ExecutionContext context;
+    const Request request = context.createRequest(testModel);
 
     // Create IPreparedModel.
     sp<IPreparedModel> preparedModel;
@@ -143,7 +145,7 @@
     ASSERT_EQ(ErrorStatus::NONE, executionCallback->getStatus());
 
     // Retrieve execution results.
-    const std::vector<TestBuffer> outputs = getOutputBuffers(request);
+    const std::vector<TestBuffer> outputs = context.getOutputBuffers(request);
 
     // We want "close-enough" results.
     checkResults(testModel, outputs);
diff --git a/neuralnetworks/1.0/vts/functional/Utils.cpp b/neuralnetworks/1.0/vts/functional/Utils.cpp
index 0dba85a..3613e69 100644
--- a/neuralnetworks/1.0/vts/functional/Utils.cpp
+++ b/neuralnetworks/1.0/vts/functional/Utils.cpp
@@ -21,10 +21,13 @@
 
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
 #include <android/hidl/allocator/1.0/IAllocator.h>
 #include <android/hidl/memory/1.0/IMemory.h>
 #include <hidlmemory/mapping.h>
+#include <vndk/hardware_buffer.h>
 
+#include <gtest/gtest.h>
 #include <algorithm>
 #include <iostream>
 #include <vector>
@@ -37,10 +40,64 @@
 using V1_0::Request;
 using V1_0::RequestArgument;
 
-constexpr uint32_t kInputPoolIndex = 0;
-constexpr uint32_t kOutputPoolIndex = 1;
+std::unique_ptr<TestAshmem> TestAshmem::create(uint32_t size) {
+    auto ashmem = std::make_unique<TestAshmem>(size);
+    return ashmem->mIsValid ? std::move(ashmem) : nullptr;
+}
 
-Request createRequest(const TestModel& testModel) {
+void TestAshmem::initialize(uint32_t size) {
+    mIsValid = false;
+    ASSERT_GT(size, 0);
+    mHidlMemory = nn::allocateSharedMemory(size);
+    ASSERT_TRUE(mHidlMemory.valid());
+    mMappedMemory = mapMemory(mHidlMemory);
+    ASSERT_NE(mMappedMemory, nullptr);
+    mPtr = static_cast<uint8_t*>(static_cast<void*>(mMappedMemory->getPointer()));
+    ASSERT_NE(mPtr, nullptr);
+    mIsValid = true;
+}
+
+std::unique_ptr<TestBlobAHWB> TestBlobAHWB::create(uint32_t size) {
+    auto ahwb = std::make_unique<TestBlobAHWB>(size);
+    return ahwb->mIsValid ? std::move(ahwb) : nullptr;
+}
+
+void TestBlobAHWB::initialize(uint32_t size) {
+    mIsValid = false;
+    ASSERT_GT(size, 0);
+    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+    const AHardwareBuffer_Desc desc = {
+            .width = size,
+            .height = 1,
+            .layers = 1,
+            .format = AHARDWAREBUFFER_FORMAT_BLOB,
+            .usage = usage,
+            .stride = size,
+    };
+    ASSERT_EQ(AHardwareBuffer_allocate(&desc, &mAhwb), 0);
+    ASSERT_NE(mAhwb, nullptr);
+
+    void* buffer = nullptr;
+    ASSERT_EQ(AHardwareBuffer_lock(mAhwb, usage, -1, nullptr, &buffer), 0);
+    ASSERT_NE(buffer, nullptr);
+    mPtr = static_cast<uint8_t*>(buffer);
+
+    const native_handle_t* handle = AHardwareBuffer_getNativeHandle(mAhwb);
+    ASSERT_NE(handle, nullptr);
+    mHidlMemory = hidl_memory("hardware_buffer_blob", handle, desc.width);
+    mIsValid = true;
+}
+
+TestBlobAHWB::~TestBlobAHWB() {
+    if (mAhwb) {
+        AHardwareBuffer_unlock(mAhwb, nullptr);
+        AHardwareBuffer_release(mAhwb);
+    }
+}
+
+Request ExecutionContext::createRequest(const TestModel& testModel, MemoryType memoryType) {
+    CHECK(memoryType == MemoryType::ASHMEM || memoryType == MemoryType::BLOB_AHWB);
+
     // Model inputs.
     hidl_vec<RequestArgument> inputs(testModel.main.inputIndexes.size());
     size_t inputSize = 0;
@@ -80,16 +137,19 @@
     }
 
     // Allocate memory pools.
-    hidl_vec<hidl_memory> pools = {nn::allocateSharedMemory(inputSize),
-                                   nn::allocateSharedMemory(outputSize)};
-    CHECK_NE(pools[kInputPoolIndex].size(), 0u);
-    CHECK_NE(pools[kOutputPoolIndex].size(), 0u);
-    sp<IMemory> inputMemory = mapMemory(pools[kInputPoolIndex]);
-    CHECK(inputMemory.get() != nullptr);
-    uint8_t* inputPtr = static_cast<uint8_t*>(static_cast<void*>(inputMemory->getPointer()));
-    CHECK(inputPtr != nullptr);
+    if (memoryType == MemoryType::ASHMEM) {
+        mInputMemory = TestAshmem::create(inputSize);
+        mOutputMemory = TestAshmem::create(outputSize);
+    } else {
+        mInputMemory = TestBlobAHWB::create(inputSize);
+        mOutputMemory = TestBlobAHWB::create(outputSize);
+    }
+    EXPECT_NE(mInputMemory, nullptr);
+    EXPECT_NE(mOutputMemory, nullptr);
+    hidl_vec<hidl_memory> pools = {mInputMemory->getHidlMemory(), mOutputMemory->getHidlMemory()};
 
     // Copy input data to the memory pool.
+    uint8_t* inputPtr = mInputMemory->getPointer();
     for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
         const auto& op = testModel.main.operands[testModel.main.inputIndexes[i]];
         if (op.data.size() > 0) {
@@ -102,18 +162,13 @@
     return {.inputs = std::move(inputs), .outputs = std::move(outputs), .pools = std::move(pools)};
 }
 
-std::vector<TestBuffer> getOutputBuffers(const Request& request) {
-    sp<IMemory> outputMemory = mapMemory(request.pools[kOutputPoolIndex]);
-    CHECK(outputMemory.get() != nullptr);
-    uint8_t* outputPtr = static_cast<uint8_t*>(static_cast<void*>(outputMemory->getPointer()));
-    CHECK(outputPtr != nullptr);
-
+std::vector<TestBuffer> ExecutionContext::getOutputBuffers(const Request& request) const {
     // Copy out output results.
+    uint8_t* outputPtr = mOutputMemory->getPointer();
     std::vector<TestBuffer> outputBuffers;
     for (const auto& output : request.outputs) {
         outputBuffers.emplace_back(output.location.length, outputPtr + output.location.offset);
     }
-
     return outputBuffers;
 }
 
diff --git a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
index cb22250..7f7dac0 100644
--- a/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
+++ b/neuralnetworks/1.0/vts/functional/VtsHalNeuralnetworks.cpp
@@ -129,7 +129,8 @@
 
 TEST_P(ValidationTest, Test) {
     const Model model = createModel(kTestModel);
-    const Request request = createRequest(kTestModel);
+    ExecutionContext context;
+    const Request request = context.createRequest(kTestModel);
     ASSERT_FALSE(kTestModel.expectFailure);
     validateEverything(kDevice, model, request);
 }
diff --git a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
index 6d4534c..3292f79 100644
--- a/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
+++ b/neuralnetworks/1.0/vts/functional/include/1.0/Utils.h
@@ -19,6 +19,8 @@
 
 #include <android-base/logging.h>
 #include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware_buffer.h>
+#include <android/hidl/memory/1.0/IMemory.h>
 #include <algorithm>
 #include <iosfwd>
 #include <string>
@@ -28,11 +30,73 @@
 
 namespace android::hardware::neuralnetworks {
 
-// Create HIDL Request from the TestModel struct.
-V1_0::Request createRequest(const test_helper::TestModel& testModel);
+// Convenience class to manage the lifetime of memory resources.
+class TestMemoryBase {
+    DISALLOW_COPY_AND_ASSIGN(TestMemoryBase);
 
-// After execution, copy out output results from the output memory pool.
-std::vector<::test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request);
+  public:
+    TestMemoryBase() = default;
+    virtual ~TestMemoryBase() = default;
+    uint8_t* getPointer() const { return mPtr; }
+    hidl_memory getHidlMemory() const { return mHidlMemory; }
+
+  protected:
+    uint8_t* mPtr = nullptr;
+    hidl_memory mHidlMemory;
+    bool mIsValid = false;
+};
+
+class TestAshmem : public TestMemoryBase {
+  public:
+    static std::unique_ptr<TestAshmem> create(uint32_t size);
+
+    // Prefer TestAshmem::create.
+    // The constructor calls initialize, which constructs the memory resources. This is a workaround
+    // that gtest macros cannot be used directly in a constructor.
+    TestAshmem(uint32_t size) { initialize(size); }
+
+  private:
+    void initialize(uint32_t size);
+    sp<hidl::memory::V1_0::IMemory> mMappedMemory;
+};
+
+class TestBlobAHWB : public TestMemoryBase {
+  public:
+    static std::unique_ptr<TestBlobAHWB> create(uint32_t size);
+
+    // Prefer TestBlobAHWB::create.
+    // The constructor calls initialize, which constructs the memory resources. This is a
+    // workaround that gtest macros cannot be used directly in a constructor.
+    TestBlobAHWB(uint32_t size) { initialize(size); }
+    ~TestBlobAHWB();
+
+  private:
+    void initialize(uint32_t size);
+    AHardwareBuffer* mAhwb = nullptr;
+};
+
+enum class MemoryType { ASHMEM, BLOB_AHWB, DEVICE };
+
+// Manages the lifetime of memory resources used in an execution.
+class ExecutionContext {
+    DISALLOW_COPY_AND_ASSIGN(ExecutionContext);
+
+  public:
+    static constexpr uint32_t kInputPoolIndex = 0;
+    static constexpr uint32_t kOutputPoolIndex = 1;
+
+    ExecutionContext() = default;
+
+    // Create HIDL Request from the TestModel struct.
+    V1_0::Request createRequest(const test_helper::TestModel& testModel,
+                                MemoryType memoryType = MemoryType::ASHMEM);
+
+    // After execution, copy out output results from the output memory pool.
+    std::vector<test_helper::TestBuffer> getOutputBuffers(const V1_0::Request& request) const;
+
+  private:
+    std::unique_ptr<TestMemoryBase> mInputMemory, mOutputMemory;
+};
 
 // Delete element from hidl_vec. hidl_vec doesn't support a "remove" operation,
 // so this is efficiently accomplished by moving the element to the end and