Merge "KeyMint HAL: cert dates are in milliseconds"
diff --git a/current.txt b/current.txt
index 0b6f698..cd1446d 100644
--- a/current.txt
+++ b/current.txt
@@ -780,6 +780,8 @@
550619f876cadbea1f718edce120f0e1dd4a6f4bd4c28b59d479677dc86b0aec android.hardware.neuralnetworks@1.3::types
c3fec5bd470984402997f78a74b6511efc4063b270f2bd9ee7b78f48b683a1bb android.hardware.neuralnetworks@1.3::IDevice
0fdfad62c2ec33b52e6687004e5a1971c02d10b93ee4d26df5ccff7ce032494a android.hardware.neuralnetworks@1.3::IPreparedModel
+b40c13f9a9affc806c778c1f8c78e90d4acb50f1d6a6be185d933d7a04b91c5b android.hardware.sensors@1.0::ISensors
+432086950205f5876da85dbd42004b0d0d05b429b9494b4f76a4d888758c5bd8 android.hardware.sensors@1.0::types
e8c86c69c438da8d1549856c1bb3e2d1b8da52722f8235ff49a30f2cce91742c android.hardware.soundtrigger@2.1::ISoundTriggerHwCallback
b9fbb6e2e061ed0960939d48b785e9700210add1f13ed32ecd688d0f1ca20ef7 android.hardware.renderscript@1.0::types
0f53d70e1eadf8d987766db4bf6ae2048004682168f4cab118da576787def3fa android.hardware.radio@1.0::types
@@ -831,7 +833,7 @@
c9ad18729268593d14681d88ffad1c97e707444a45e1b4ed804dab949edbd84f android.hardware.radio.config@1.3::IRadioConfigResponse
78dcb9a6975e8b377cb90bbe952078162960941468c992dcd2e1830a477b8c03 android.hardware.radio.config@1.3::types
fd43298c43f70130c747a642ee43b0c242ac0cebffb377faa24f2725f0aa6caf android.hardware.tetheroffload.control@1.1::IOffloadControl
-fe18c9032e4063efca3fff3c377dd69780de1f96e8e2bc3f7d100a5d8bd467b4 android.hardware.tetheroffload.control@1.1::ITetheringOffloadCallback
+ead4ec8713a2cb40906fe31ba793d21a6b1190143c446690d16a6ea686aa2fea android.hardware.tetheroffload.control@1.1::ITetheringOffloadCallback
e34b4c7bec5e032c14804707ca924dd6b99ed5ba139da7505fe7d698d0fe178f android.hardware.tetheroffload.control@1.1::types
# There should be no more HIDL HALs - please use AIDL instead.
diff --git a/identity/aidl/default/libeic/EicCbor.c b/identity/aidl/default/libeic/EicCbor.c
index fe131eb..0e2684f 100644
--- a/identity/aidl/default/libeic/EicCbor.c
+++ b/identity/aidl/default/libeic/EicCbor.c
@@ -114,7 +114,7 @@
data[4] = size & 0xff;
eicCborAppend(cbor, data, 5);
} else {
- data[0] = (majorType << 5) | 24;
+ data[0] = (majorType << 5) | 27;
data[1] = (((uint64_t)size) >> 56) & 0xff;
data[2] = (((uint64_t)size) >> 48) & 0xff;
data[3] = (((uint64_t)size) >> 40) & 0xff;
diff --git a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
index e46cb48..0639da8 100644
--- a/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
+++ b/keymaster/4.1/vts/functional/DeviceUniqueAttestationTest.cpp
@@ -243,7 +243,9 @@
EXPECT_EQ(ErrorCode::OK, result);
EXPECT_EQ(2U, cert_chain.size());
- if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+ if (dumpAttestations) {
+ for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+ }
auto [err, attestation] = parse_attestation_record(cert_chain[0]);
ASSERT_EQ(ErrorCode::OK, err);
@@ -287,7 +289,9 @@
EXPECT_EQ(ErrorCode::OK, result);
EXPECT_EQ(2U, cert_chain.size());
- if (dumpAttestations) dumpContent(bin2hex(cert_chain[0]));
+ if (dumpAttestations) {
+ for (auto cert_ : cert_chain) dumpContent(bin2hex(cert_));
+ }
auto [err, attestation] = parse_attestation_record(cert_chain[0]);
ASSERT_EQ(ErrorCode::OK, err);
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
index 7849ca7..8bd2fbe 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Burst.h
@@ -48,6 +48,10 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
private:
const nn::SharedPreparedModel kPreparedModel;
};
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
new file mode 100644
index 0000000..e201e25
--- /dev/null
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_0::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index 8853eea..48be595 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -57,10 +57,17 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const V1_0::Request& request, const hal::utils::RequestRelocation& relocation) const;
+
private:
const sp<V1_0::IPreparedModel> kPreparedModel;
const hal::utils::DeathHandler kDeathHandler;
diff --git a/neuralnetworks/1.0/utils/src/Burst.cpp b/neuralnetworks/1.0/utils/src/Burst.cpp
index e3a9757..1284721 100644
--- a/neuralnetworks/1.0/utils/src/Burst.cpp
+++ b/neuralnetworks/1.0/utils/src/Burst.cpp
@@ -55,4 +55,10 @@
return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
}
+nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+}
+
} // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/Execution.cpp b/neuralnetworks/1.0/utils/src/Execution.cpp
new file mode 100644
index 0000000..7a3216b
--- /dev/null
+++ b/neuralnetworks/1.0/utils/src/Execution.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_0::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_0::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kPreparedModel->executeInternal(kRequest, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on 1.0 HAL service";
+}
+
+} // namespace android::hardware::neuralnetworks::V1_0::utils
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 858571d..00970c0 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -19,6 +19,7 @@
#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "Utils.h"
#include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
@@ -61,22 +62,35 @@
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
+ return executeInternal(hidlRequest, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const V1_0::Request& request,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
const auto cb = sp<ExecutionCallback>::make();
const auto scoped = kDeathHandler.protectCallback(cb.get());
- const auto ret = kPreparedModel->execute(hidlRequest, cb);
+ const auto ret = kPreparedModel->execute(request, cb);
const auto status = HANDLE_TRANSPORT_FAILURE(ret);
HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
auto result = NN_TRY(cb->get());
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return result;
}
@@ -91,6 +105,20 @@
<< "IPreparedModel::executeFenced is not supported on 1.0 HAL service";
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation));
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
return Burst::create(shared_from_this());
}
diff --git a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
index f19ed77..7820c06 100644
--- a/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.0/utils/test/PreparedModelTest.cpp
@@ -19,6 +19,7 @@
#include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -224,6 +225,150 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
+TEST(PreparedModelTest, reusableExecute) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecute(V1_0::ErrorStatus::GENERAL_FAILURE,
+ V1_0::ErrorStatus::GENERAL_FAILURE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecute(V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, execute(_, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_0::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute(_, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
new file mode 100644
index 0000000..9c66446
--- /dev/null
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
+
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/ProtectCallback.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ V1_0::Request request, hal::utils::RequestRelocation relocation,
+ V1_2::MeasureTiming measure);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const V1_0::Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const MeasureTiming kMeasure;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_2::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_2_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
index 9669d8c0..dae1ff3 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
@@ -28,9 +28,11 @@
#include <fmq/MessageQueue.h>
#include <hidl/MQDescriptor.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/ProtectCallback.h>
#include <atomic>
@@ -51,14 +53,14 @@
* across FMQ, making it appear to the runtime as a regular synchronous inference. Additionally,
* this class manages the burst's memory cache.
*/
-class ExecutionBurstController final : public nn::IBurst {
+class ExecutionBurstController final
+ : public nn::IBurst,
+ public std::enable_shared_from_this<ExecutionBurstController> {
struct PrivateConstructorTag {};
public:
- using FallbackFunction =
- std::function<nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>(
- const nn::Request&, nn::MeasureTiming, const nn::OptionalTimePoint&,
- const nn::OptionalDuration&)>;
+ using FallbackFunction = std::function<
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>()>;
/**
* NN runtime memory cache.
@@ -154,10 +156,10 @@
* @return ExecutionBurstController Execution burst controller object.
*/
static nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> create(
- const sp<IPreparedModel>& preparedModel, FallbackFunction fallback,
+ nn::SharedPreparedModel preparedModel, const sp<IPreparedModel>& hidlPreparedModel,
std::chrono::microseconds pollingTimeWindow);
- ExecutionBurstController(PrivateConstructorTag tag, FallbackFunction fallback,
+ ExecutionBurstController(PrivateConstructorTag tag, nn::SharedPreparedModel preparedModel,
std::unique_ptr<RequestChannelSender> requestChannelSender,
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
@@ -173,9 +175,21 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ // See IBurst::createReusableExecution for information on this method.
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ // If fallback is not nullptr, this method will invoke the fallback function to try another
+ // execution path if the packet could not be sent. Otherwise, failing to send the packet will
+ // result in an error.
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const std::vector<FmqRequestDatum>& requestPacket,
+ const hal::utils::RequestRelocation& relocation, FallbackFunction fallback) const;
+
private:
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
- const FallbackFunction kFallback;
+ const nn::SharedPreparedModel kPreparedModel;
const std::unique_ptr<RequestChannelSender> mRequestChannelSender;
const std::unique_ptr<ResultChannelReceiver> mResultChannelReceiver;
const sp<ExecutionBurstCallback> mBurstCallback;
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index fb11130..35abd79 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -58,10 +58,18 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const V1_0::Request& request, MeasureTiming measure,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
const V1_0::Request& request, MeasureTiming measure) const;
diff --git a/neuralnetworks/1.2/utils/src/Execution.cpp b/neuralnetworks/1.2/utils/src/Execution.cpp
new file mode 100644
index 0000000..18d1c90
--- /dev/null
+++ b/neuralnetworks/1.2/utils/src/Execution.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Callbacks.h"
+#include "Conversions.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_2::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_2::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure);
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, V1_0::Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kPreparedModel->executeInternal(kRequest, kMeasure, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& /*waitFor*/, const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on 1.2 HAL service";
+}
+
+} // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index 7a17f25..b4b6f68 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -28,6 +28,7 @@
#include <nnapi/Types.h>
#include <nnapi/Validation.h>
#include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/CommonUtils.h>
#include <nnapi/hal/HandleError.h>
#include <nnapi/hal/ProtectCallback.h>
#include <nnapi/hal/TransferValue.h>
@@ -50,6 +51,35 @@
namespace android::hardware::neuralnetworks::V1_2::utils {
namespace {
+class BurstExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<BurstExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+
+ BurstExecution(PrivateConstructorTag tag,
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const ExecutionBurstController> kController;
+ const std::vector<FmqRequestDatum> kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const std::vector<ExecutionBurstController::OptionalCacheHold> kCacheHolds;
+};
+
nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
@@ -209,10 +239,10 @@
// ExecutionBurstController methods
nn::GeneralResult<std::shared_ptr<const ExecutionBurstController>> ExecutionBurstController::create(
- const sp<V1_2::IPreparedModel>& preparedModel, FallbackFunction fallback,
+ nn::SharedPreparedModel preparedModel, const sp<V1_2::IPreparedModel>& hidlPreparedModel,
std::chrono::microseconds pollingTimeWindow) {
// check inputs
- if (preparedModel == nullptr) {
+ if (preparedModel == nullptr || hidlPreparedModel == nullptr) {
return NN_ERROR() << "ExecutionBurstController::create passed a nullptr";
}
@@ -236,7 +266,7 @@
auto cb = hal::utils::CallbackValue(executionBurstResultCallback);
// configure burst
- const Return<void> ret = preparedModel->configureExecutionBurst(
+ const Return<void> ret = hidlPreparedModel->configureExecutionBurst(
burstCallback, *requestChannelDescriptor, *resultChannelDescriptor, cb);
HANDLE_TRANSPORT_FAILURE(ret);
@@ -250,18 +280,18 @@
// make and return controller
return std::make_shared<const ExecutionBurstController>(
- PrivateConstructorTag{}, std::move(fallback), std::move(requestChannelSender),
+ PrivateConstructorTag{}, std::move(preparedModel), std::move(requestChannelSender),
std::move(resultChannelReceiver), std::move(burstCallback), std::move(burstContext),
std::move(memoryCache), std::move(deathHandler));
}
ExecutionBurstController::ExecutionBurstController(
- PrivateConstructorTag /*tag*/, FallbackFunction fallback,
+ PrivateConstructorTag /*tag*/, nn::SharedPreparedModel preparedModel,
std::unique_ptr<RequestChannelSender> requestChannelSender,
std::unique_ptr<ResultChannelReceiver> resultChannelReceiver,
sp<ExecutionBurstCallback> callback, sp<IBurstContext> burstContext,
std::shared_ptr<MemoryCache> memoryCache, neuralnetworks::utils::DeathHandler deathHandler)
- : kFallback(std::move(fallback)),
+ : kPreparedModel(std::move(preparedModel)),
mRequestChannelSender(std::move(requestChannelSender)),
mResultChannelReceiver(std::move(resultChannelReceiver)),
mBurstCallback(std::move(callback)),
@@ -283,26 +313,98 @@
// systraces. Note that the first point we can begin collecting systraces in
// ExecutionBurstServer is when the RequestChannelReceiver realizes there is data in the FMQ, so
// ExecutionBurstServer collects systraces at different points in the code.
- NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::execute");
// if the request is valid but of a higher version than what's supported in burst execution,
// fall back to another execution path
if (const auto version = NN_TRY(hal::utils::makeExecutionFailure(nn::validate(request)));
version > nn::Version::ANDROID_Q) {
// fallback to another execution path if the packet could not be sent
- if (kFallback) {
- return kFallback(request, measure, deadline, loopTimeoutDuration);
- }
- return NN_ERROR() << "Request object has features not supported by IBurst::execute";
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
}
+ // ensure that request is ready for IPC
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
+
// clear pools field of request, as they will be provided via slots
- const auto requestWithoutPools =
- nn::Request{.inputs = request.inputs, .outputs = request.outputs, .pools = {}};
+ const auto requestWithoutPools = nn::Request{
+ .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
auto hidlRequest = NN_TRY(
hal::utils::makeExecutionFailure(V1_0::utils::unvalidatedConvert(requestWithoutPools)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
+ std::vector<int32_t> slots;
+ std::vector<OptionalCacheHold> holds;
+ slots.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
+ slots.push_back(slot);
+ holds.push_back(std::move(hold));
+ }
+
+ // send request packet
+ const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
+ const auto fallback = [this, &request, measure, &deadline, &loopTimeoutDuration] {
+ return kPreparedModel->execute(request, measure, deadline, loopTimeoutDuration);
+ };
+ return executeInternal(requestPacket, relocation, fallback);
+}
+
+// See IBurst::createReusableExecution for information on this method.
+nn::GeneralResult<nn::SharedExecution> ExecutionBurstController::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ NNTRACE_RT(NNTRACE_PHASE_EXECUTION, "ExecutionBurstController::createReusableExecution");
+
+ // if the request is valid but of a higher version than what's supported in burst execution,
+ // fall back to another execution path
+ if (const auto version = NN_TRY(hal::utils::makeGeneralFailure(nn::validate(request)));
+ version > nn::Version::ANDROID_Q) {
+ // fallback to another execution path if the packet could not be sent
+ return kPreparedModel->createReusableExecution(request, measure, loopTimeoutDuration);
+ }
+
+ // ensure that request is ready for IPC
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ // clear pools field of request, as they will be provided via slots
+ const auto requestWithoutPools = nn::Request{
+ .inputs = requestInShared.inputs, .outputs = requestInShared.outputs, .pools = {}};
+ auto hidlRequest = NN_TRY(V1_0::utils::unvalidatedConvert(requestWithoutPools));
+ const auto hidlMeasure = NN_TRY(convert(measure));
+
+ std::vector<int32_t> slots;
+ std::vector<OptionalCacheHold> holds;
+ slots.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
+ slots.push_back(slot);
+ holds.push_back(std::move(hold));
+ }
+
+ const auto requestPacket = serialize(hidlRequest, hidlMeasure, slots);
+ return BurstExecution::create(shared_from_this(), std::move(requestPacket),
+ std::move(relocation), std::move(holds));
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ExecutionBurstController::executeInternal(const std::vector<FmqRequestDatum>& requestPacket,
+ const hal::utils::RequestRelocation& relocation,
+ FallbackFunction fallback) const {
+ NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_EXECUTION,
+ "ExecutionBurstController::executeInternal");
+
// Ensure that at most one execution is in flight at any given time.
const bool alreadyInFlight = mExecutionInFlight.test_and_set();
if (alreadyInFlight) {
@@ -310,22 +412,16 @@
}
const auto guard = base::make_scope_guard([this] { mExecutionInFlight.clear(); });
- std::vector<int32_t> slots;
- std::vector<OptionalCacheHold> holds;
- slots.reserve(request.pools.size());
- holds.reserve(request.pools.size());
- for (const auto& memoryPool : request.pools) {
- auto [slot, hold] = mMemoryCache->cacheMemory(std::get<nn::SharedMemory>(memoryPool));
- slots.push_back(slot);
- holds.push_back(std::move(hold));
+ if (relocation.input) {
+ relocation.input->flush();
}
// send request packet
- const auto sendStatus = mRequestChannelSender->send(hidlRequest, hidlMeasure, slots);
+ const auto sendStatus = mRequestChannelSender->sendPacket(requestPacket);
if (!sendStatus.ok()) {
// fallback to another execution path if the packet could not be sent
- if (kFallback) {
- return kFallback(request, measure, deadline, loopTimeoutDuration);
+ if (fallback) {
+ return fallback();
}
return NN_ERROR() << "Error sending FMQ packet: " << sendStatus.error();
}
@@ -333,7 +429,47 @@
// get result packet
const auto [status, outputShapes, timing] =
NN_TRY(hal::utils::makeExecutionFailure(mResultChannelReceiver->getBlocking()));
+
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return executionCallback(status, outputShapes, timing);
}
+nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request, hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds) {
+ if (controller == nullptr) {
+ return NN_ERROR() << "V1_2::utils::BurstExecution::create must have non-null controller";
+ }
+
+ return std::make_shared<const BurstExecution>(PrivateConstructorTag{}, std::move(controller),
+ std::move(request), std::move(relocation),
+ std::move(cacheHolds));
+}
+
+BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const ExecutionBurstController> controller,
+ std::vector<FmqRequestDatum> request,
+ hal::utils::RequestRelocation relocation,
+ std::vector<ExecutionBurstController::OptionalCacheHold> cacheHolds)
+ : kController(std::move(controller)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kCacheHolds(std::move(cacheHolds)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return kController->executeInternal(kRequest, kRelocation, /*fallback=*/nullptr);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on burst object";
+}
+
} // namespace android::hardware::neuralnetworks::V1_2::utils
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index b209a44..d0ef36e 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -18,6 +18,7 @@
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "ExecutionBurstController.h"
#include "ExecutionBurstUtils.h"
#include "Utils.h"
@@ -93,19 +94,32 @@
const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
- auto result = kExecuteSynchronously ? executeSynchronously(hidlRequest, hidlMeasure)
- : executeAsynchronously(hidlRequest, hidlMeasure);
+ return executeInternal(hidlRequest, hidlMeasure, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const V1_0::Request& request, MeasureTiming measure,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
+ auto result = kExecuteSynchronously ? executeSynchronously(request, measure)
+ : executeAsynchronously(request, measure);
auto [outputShapes, timing] = NN_TRY(std::move(result));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -120,6 +134,22 @@
<< "IPreparedModel::executeFenced is not supported on 1.2 HAL service";
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ auto hidlMeasure = NN_TRY(convert(measure));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
+ hidlMeasure);
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
auto self = shared_from_this();
auto fallback = [preparedModel = std::move(self)](
@@ -130,7 +160,7 @@
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
};
const auto pollingTimeWindow = getBurstControllerPollingTimeWindow();
- return ExecutionBurstController::create(kPreparedModel, std::move(fallback), pollingTimeWindow);
+ return ExecutionBurstController::create(shared_from_this(), kPreparedModel, pollingTimeWindow);
}
std::any PreparedModel::getUnderlyingResource() const {
diff --git a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
index d297b1a..5e2ad79 100644
--- a/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.2/utils/test/PreparedModelTest.cpp
@@ -21,6 +21,7 @@
#include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -334,6 +335,248 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteSynchronously(V1_0::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteSynchronously(V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteAsynchronously(
+ V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(V1_0::ErrorStatus::GENERAL_FAILURE,
+ V1_0::ErrorStatus::GENERAL_FAILURE, {},
+ kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(
+ V1_0::ErrorStatus::NONE, V1_0::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_0::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_0::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute_1_2(_, _, _)).Times(1).WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedNotSupported) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
new file mode 100644
index 0000000..06c33d4
--- /dev/null
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Execution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation,
+ V1_2::MeasureTiming measure, OptionalTimeoutDuration loopTimeoutDuration);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const V1_2::MeasureTiming kMeasure;
+ const OptionalTimeoutDuration kLoopTimeoutDuration;
+};
+
+} // namespace android::hardware::neuralnetworks::V1_3::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_UTILS_EXECUTION_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index 690fecc..5acba71 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -57,10 +57,26 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeSynchronously(
const Request& request, V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp
new file mode 100644
index 0000000..3d17cc3
--- /dev/null
+++ b/neuralnetworks/1.3/utils/src/Execution.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <android/hardware/neuralnetworks/1.0/types.h>
+#include <android/hardware/neuralnetworks/1.1/types.h>
+#include <android/hardware/neuralnetworks/1.2/types.h>
+#include <android/hardware/neuralnetworks/1.3/IPreparedModel.h>
+#include <android/hardware/neuralnetworks/1.3/types.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace android::hardware::neuralnetworks::V1_3::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "V1_3::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure,
+ std::move(loopTimeoutDuration));
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, V1_2::MeasureTiming measure,
+ OptionalTimeoutDuration loopTimeoutDuration)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(std::move(loopTimeoutDuration)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto hidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kPreparedModel->executeInternal(kRequest, kMeasure, hidlDeadline, kLoopTimeoutDuration,
+ kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+ const auto hidlDeadline = NN_TRY(convert(deadline));
+ const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline,
+ kLoopTimeoutDuration,
+ hidlTimeoutDurationAfterFence, kRelocation);
+}
+
+} // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index fd7f8f2..1623de5 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -18,6 +18,7 @@
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
#include "Utils.h"
#include <android/hardware/neuralnetworks/1.0/types.h>
@@ -139,8 +140,11 @@
const nn::OptionalDuration& loopTimeoutDuration) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto hidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto hidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -148,16 +152,27 @@
const auto hidlLoopTimeoutDuration =
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+ return executeInternal(hidlRequest, hidlMeasure, hidlDeadline, hidlLoopTimeoutDuration,
+ relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const Request& request, V1_2::MeasureTiming measure,
+ const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
auto result = kExecuteSynchronously
- ? executeSynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration)
- : executeAsynchronously(hidlRequest, hidlMeasure, hidlDeadline,
- hidlLoopTimeoutDuration);
+ ? executeSynchronously(request, measure, deadline, loopTimeoutDuration)
+ : executeAsynchronously(request, measure, deadline, loopTimeoutDuration);
auto [outputShapes, timing] = NN_TRY(std::move(result));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -168,8 +183,10 @@
const nn::OptionalDuration& timeoutDurationAfterFence) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared =
- NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
const auto hidlRequest = NN_TRY(convert(requestInShared));
const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
@@ -178,27 +195,59 @@
const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return executeFencedInternal(hidlRequest, hidlWaitFor, hidlMeasure, hidlDeadline,
+ hidlLoopTimeoutDuration, hidlTimeoutDurationAfterFence,
+ relocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFencedInternal(const Request& request, const hidl_vec<hidl_handle>& waitFor,
+ V1_2::MeasureTiming measure, const OptionalTimePoint& deadline,
+ const OptionalTimeoutDuration& loopTimeoutDuration,
+ const OptionalTimeoutDuration& timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
+
auto cb = hal::utils::CallbackValue(fencedExecutionCallback);
- const auto ret = kPreparedModel->executeFenced(hidlRequest, hidlWaitFor, hidlMeasure,
- hidlDeadline, hidlLoopTimeoutDuration,
- hidlTimeoutDurationAfterFence, cb);
+ const auto ret =
+ kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
+ timeoutDurationAfterFence, cb);
HANDLE_TRANSPORT_FAILURE(ret);
auto [syncFence, callback] = NN_TRY(cb.take());
// If executeFenced required the request memory to be moved into shared memory, block here until
// the fenced execution has completed and flush the memory back.
- if (maybeRequestInShared.has_value()) {
+ if (relocation.output) {
const auto state = syncFence.syncWait({});
if (state != nn::SyncFence::FenceState::SIGNALED) {
return NN_ERROR() << "syncWait failed with " << state;
}
- NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
+ relocation.output->flush();
}
return std::make_pair(std::move(syncFence), std::move(callback));
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto hidlRequest = NN_TRY(convert(requestInShared));
+ auto hidlMeasure = NN_TRY(convert(measure));
+ auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+ return Execution::create(shared_from_this(), std::move(hidlRequest), std::move(relocation),
+ hidlMeasure, std::move(hidlLoopTimeoutDuration));
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
auto self = shared_from_this();
auto fallback = [preparedModel = std::move(self)](
@@ -209,7 +258,7 @@
return preparedModel->execute(request, measure, deadline, loopTimeoutDuration);
};
const auto pollingTimeWindow = V1_2::utils::getBurstControllerPollingTimeWindow();
- return V1_2::utils::ExecutionBurstController::create(kPreparedModel, std::move(fallback),
+ return V1_2::utils::ExecutionBurstController::create(shared_from_this(), kPreparedModel,
pollingTimeWindow);
}
diff --git a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
index 5303c2a..6dbbd6b 100644
--- a/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/1.3/utils/test/PreparedModelTest.cpp
@@ -22,6 +22,7 @@
#include <android/hardware/neuralnetworks/1.3/IFencedExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -462,6 +463,363 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteSynchronously(V1_3::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteSynchronously(V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteAsynchronously(
+ V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::NONE, {}, kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncLaunchError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(V1_3::ErrorStatus::GENERAL_FAILURE,
+ V1_3::ErrorStatus::GENERAL_FAILURE, {},
+ kNoTiming)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncReturnError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteAsynchronously(
+ V1_3::ErrorStatus::NONE, V1_3::ErrorStatus::GENERAL_FAILURE, {}, kNoTiming)));
+
+ // run test
+ const auto result = preparedModel->execute({}, {}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteAsyncCrash) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/false).value();
+ const auto ret = [&mockPreparedModel]() -> hardware::Return<V1_3::ErrorStatus> {
+ mockPreparedModel->simulateCrash();
+ return V1_3::ErrorStatus::NONE;
+ };
+ EXPECT_CALL(*mockPreparedModel, execute_1_3(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(ret));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFenced) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::NONE,
+ kNoTiming, kNoTiming)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
+ << ": " << callbackResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
+ // setup call
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteFencedCallbackReturn(V1_3::ErrorStatus::GENERAL_FAILURE,
+ kNoTiming, kNoTiming)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeExecuteFencedReturn(V1_3::ErrorStatus::NONE, {}, mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
+ << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedError) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(
+ makeExecuteFencedReturn(V1_3::ErrorStatus::GENERAL_FAILURE, {}, nullptr)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
+ // setup test
+ const auto mockPreparedModel = createMockPreparedModel();
+ const auto preparedModel =
+ PreparedModel::create(mockPreparedModel, /*executeSynchronously=*/true).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
index 008e4e4..0cc78d4 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Burst.h
@@ -38,7 +38,7 @@
namespace aidl::android::hardware::neuralnetworks::utils {
// Class that adapts aidl_hal::IBurst to nn::IBurst.
-class Burst final : public nn::IBurst {
+class Burst final : public nn::IBurst, public std::enable_shared_from_this<Burst> {
struct PrivateConstructorTag {};
public:
@@ -100,6 +100,16 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ // See IBurst::createReusableExecution for information.
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const aidl_hal::Request& request, const std::vector<int64_t>& memoryIdentifierTokens,
+ bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
mutable std::atomic_flag mExecutionInFlight = ATOMIC_FLAG_INIT;
const std::shared_ptr<aidl_hal::IBurst> kBurst;
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
new file mode 100644
index 0000000..a77ea98
--- /dev/null
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Execution.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+
+#include "PreparedModel.h"
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+class Execution final : public nn::IExecution, public std::enable_shared_from_this<Execution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const Execution>> create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration);
+
+ Execution(PrivateConstructorTag tag, std::shared_ptr<const PreparedModel> preparedModel,
+ Request request, hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const PreparedModel> kPreparedModel;
+ const Request kRequest;
+ const hal::utils::RequestRelocation kRelocation;
+ const bool kMeasure;
+ const int64_t kLoopTimeoutDuration;
+};
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_EXECUTION_H
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
index abce6cc..4035764 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/PreparedModel.h
@@ -18,6 +18,7 @@
#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_PREPARED_MODEL_H
#include <aidl/android/hardware/neuralnetworks/IPreparedModel.h>
+#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/Types.h>
@@ -34,7 +35,8 @@
namespace aidl::android::hardware::neuralnetworks::utils {
// Class that adapts aidl_hal::IPreparedModel to nn::IPreparedModel.
-class PreparedModel final : public nn::IPreparedModel {
+class PreparedModel final : public nn::IPreparedModel,
+ public std::enable_shared_from_this<PreparedModel> {
struct PrivateConstructorTag {};
public:
@@ -55,10 +57,25 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executeInternal(
+ const Request& request, bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ executeFencedInternal(const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration,
+ int64_t timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const;
+
private:
const std::shared_ptr<aidl_hal::IPreparedModel> kPreparedModel;
};
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index 0b475bc..87cd0e4 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -22,6 +22,7 @@
#include <android-base/logging.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IExecution.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -35,6 +36,39 @@
namespace aidl::android::hardware::neuralnetworks::utils {
namespace {
+class BurstExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<BurstExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ static nn::GeneralResult<std::shared_ptr<const BurstExecution>> create(
+ std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds);
+
+ BurstExecution(PrivateConstructorTag tag, std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure,
+ int64_t loopTimeoutDuration, hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds);
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ const std::shared_ptr<const Burst> kBurst;
+ const Request kRequest;
+ const std::vector<int64_t>& kMemoryIdentifierTokens;
+ const bool kMeasure;
+ const int64_t kLoopTimeoutDuration;
+ const hal::utils::RequestRelocation kRelocation;
+ const std::vector<Burst::OptionalCacheHold> kCacheHolds;
+};
+
nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> convertExecutionResults(
const std::vector<OutputShape>& outputShapes, const Timing& timing) {
return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
@@ -139,17 +173,13 @@
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const {
- // Ensure that at most one execution is in flight at any given time.
- const bool alreadyInFlight = mExecutionInFlight.test_and_set();
- if (alreadyInFlight) {
- return NN_ERROR() << "IBurst already has an execution in flight";
- }
- const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
-
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
@@ -159,9 +189,9 @@
std::vector<int64_t> memoryIdentifierTokens;
std::vector<OptionalCacheHold> holds;
- memoryIdentifierTokens.reserve(request.pools.size());
- holds.reserve(request.pools.size());
- for (const auto& memoryPool : request.pools) {
+ memoryIdentifierTokens.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
auto& [identifier, hold] = *cached;
@@ -172,12 +202,30 @@
}
memoryIdentifierTokens.push_back(-1);
}
- CHECK_EQ(request.pools.size(), memoryIdentifierTokens.size());
+ CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
+
+ return executeInternal(aidlRequest, memoryIdentifierTokens, aidlMeasure, aidlDeadline,
+ aidlLoopTimeoutDuration, relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Burst::executeInternal(
+ const Request& request, const std::vector<int64_t>& memoryIdentifierTokens, bool measure,
+ int64_t deadline, int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ // Ensure that at most one execution is in flight at any given time.
+ const bool alreadyInFlight = mExecutionInFlight.test_and_set();
+ if (alreadyInFlight) {
+ return NN_ERROR() << "IBurst already has an execution in flight";
+ }
+ const auto guard = ::android::base::make_scope_guard([this] { mExecutionInFlight.clear(); });
+
+ if (relocation.input) {
+ relocation.input->flush();
+ }
ExecutionResult executionResult;
- const auto ret =
- kBurst->executeSynchronously(aidlRequest, memoryIdentifierTokens, aidlMeasure,
- aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+ const auto ret = kBurst->executeSynchronously(request, memoryIdentifierTokens, measure,
+ deadline, loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "execute failed";
if (!executionResult.outputSufficientSize) {
auto canonicalOutputShapes =
@@ -188,10 +236,89 @@
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
+nn::GeneralResult<nn::SharedExecution> Burst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto aidlRequest = NN_TRY(convert(requestInShared));
+ const auto aidlMeasure = NN_TRY(convert(measure));
+ const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+
+ std::vector<int64_t> memoryIdentifierTokens;
+ std::vector<OptionalCacheHold> holds;
+ memoryIdentifierTokens.reserve(requestInShared.pools.size());
+ holds.reserve(requestInShared.pools.size());
+ for (const auto& memoryPool : requestInShared.pools) {
+ if (const auto* memory = std::get_if<nn::SharedMemory>(&memoryPool)) {
+ if (auto cached = kMemoryCache->getMemoryIfAvailable(*memory)) {
+ auto& [identifier, hold] = *cached;
+ memoryIdentifierTokens.push_back(identifier);
+ holds.push_back(std::move(hold));
+ continue;
+ }
+ }
+ memoryIdentifierTokens.push_back(-1);
+ }
+ CHECK_EQ(requestInShared.pools.size(), memoryIdentifierTokens.size());
+
+ return BurstExecution::create(shared_from_this(), std::move(aidlRequest),
+ std::move(memoryIdentifierTokens), aidlMeasure,
+ aidlLoopTimeoutDuration, std::move(relocation), std::move(holds));
+}
+
+nn::GeneralResult<std::shared_ptr<const BurstExecution>> BurstExecution::create(
+ std::shared_ptr<const Burst> burst, Request request,
+ std::vector<int64_t> memoryIdentifierTokens, bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds) {
+ if (burst == nullptr) {
+ return NN_ERROR() << "aidl::utils::BurstExecution::create must have non-null burst";
+ }
+
+ return std::make_shared<const BurstExecution>(
+ PrivateConstructorTag{}, std::move(burst), std::move(request),
+ std::move(memoryIdentifierTokens), measure, loopTimeoutDuration, std::move(relocation),
+ std::move(cacheHolds));
+}
+
+BurstExecution::BurstExecution(PrivateConstructorTag /*tag*/, std::shared_ptr<const Burst> burst,
+ Request request, std::vector<int64_t> memoryIdentifierTokens,
+ bool measure, int64_t loopTimeoutDuration,
+ hal::utils::RequestRelocation relocation,
+ std::vector<Burst::OptionalCacheHold> cacheHolds)
+ : kBurst(std::move(burst)),
+ kRequest(std::move(request)),
+ kMemoryIdentifierTokens(std::move(memoryIdentifierTokens)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(loopTimeoutDuration),
+ kRelocation(std::move(relocation)),
+ kCacheHolds(std::move(cacheHolds)) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> BurstExecution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kBurst->executeInternal(kRequest, kMemoryIdentifierTokens, kMeasure, aidlDeadline,
+ kLoopTimeoutDuration, kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+BurstExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+ << "IExecution::computeFenced is not supported on burst object";
+}
+
} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
new file mode 100644
index 0000000..2aee8a6
--- /dev/null
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Execution.h"
+
+#include "Conversions.h"
+#include "PreparedModel.h"
+#include "Utils.h"
+
+#include <aidl/android/hardware/neuralnetworks/Request.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/CommonUtils.h>
+#include <nnapi/hal/HandleError.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+// See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
+// lifetimes across processes and for protecting asynchronous calls across HIDL.
+
+namespace aidl::android::hardware::neuralnetworks::utils {
+
+nn::GeneralResult<std::shared_ptr<const Execution>> Execution::create(
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure, int64_t loopTimeoutDuration) {
+ if (preparedModel == nullptr) {
+ return NN_ERROR() << "aidl::utils::Execution::create must have non-null preparedModel";
+ }
+
+ return std::make_shared<const Execution>(PrivateConstructorTag{}, std::move(preparedModel),
+ std::move(request), std::move(relocation), measure,
+ loopTimeoutDuration);
+}
+
+Execution::Execution(PrivateConstructorTag /*tag*/,
+ std::shared_ptr<const PreparedModel> preparedModel, Request request,
+ hal::utils::RequestRelocation relocation, bool measure,
+ int64_t loopTimeoutDuration)
+ : kPreparedModel(std::move(preparedModel)),
+ kRequest(std::move(request)),
+ kRelocation(std::move(relocation)),
+ kMeasure(measure),
+ kLoopTimeoutDuration(loopTimeoutDuration) {}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> Execution::compute(
+ const nn::OptionalTimePoint& deadline) const {
+ const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
+ return kPreparedModel->executeInternal(kRequest, kMeasure, aidlDeadline, kLoopTimeoutDuration,
+ kRelocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto aidlWaitFor = NN_TRY(convert(waitFor));
+ const auto aidlDeadline = NN_TRY(convert(deadline));
+ const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return kPreparedModel->executeFencedInternal(kRequest, aidlWaitFor, kMeasure, aidlDeadline,
+ kLoopTimeoutDuration,
+ aidlTimeoutDurationAfterFence, kRelocation);
+}
+
+} // namespace aidl::android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 003965b..18e7636 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -19,8 +19,11 @@
#include "Burst.h"
#include "Callbacks.h"
#include "Conversions.h"
+#include "Execution.h"
+#include "ProtectCallback.h"
#include "Utils.h"
+#include <aidl/android/hardware/neuralnetworks/Request.h>
#include <android/binder_auto_utils.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
@@ -74,18 +77,32 @@
const nn::OptionalDuration& loopTimeoutDuration) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared = NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared)));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared =
+ NN_TRY(hal::utils::makeExecutionFailure(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation)));
const auto aidlRequest = NN_TRY(hal::utils::makeExecutionFailure(convert(requestInShared)));
const auto aidlMeasure = NN_TRY(hal::utils::makeExecutionFailure(convert(measure)));
const auto aidlDeadline = NN_TRY(hal::utils::makeExecutionFailure(convert(deadline)));
const auto aidlLoopTimeoutDuration =
NN_TRY(hal::utils::makeExecutionFailure(convert(loopTimeoutDuration)));
+ return executeInternal(aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration,
+ relocation);
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+PreparedModel::executeInternal(const Request& request, bool measure, int64_t deadline,
+ int64_t loopTimeoutDuration,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
ExecutionResult executionResult;
- const auto ret = kPreparedModel->executeSynchronously(
- aidlRequest, aidlMeasure, aidlDeadline, aidlLoopTimeoutDuration, &executionResult);
+ const auto ret = kPreparedModel->executeSynchronously(request, measure, deadline,
+ loopTimeoutDuration, &executionResult);
HANDLE_ASTATUS(ret) << "executeSynchronously failed";
if (!executionResult.outputSufficientSize) {
auto canonicalOutputShapes =
@@ -96,9 +113,9 @@
auto [outputShapes, timing] = NN_TRY(hal::utils::makeExecutionFailure(
convertExecutionResults(executionResult.outputShapes, executionResult.timing)));
- NN_TRY(hal::utils::makeExecutionFailure(
- hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared)));
-
+ if (relocation.output) {
+ relocation.output->flush();
+ }
return std::make_pair(std::move(outputShapes), timing);
}
@@ -109,8 +126,10 @@
const nn::OptionalDuration& timeoutDurationAfterFence) const {
// Ensure that request is ready for IPC.
std::optional<nn::Request> maybeRequestInShared;
- const nn::Request& requestInShared =
- NN_TRY(hal::utils::flushDataFromPointerToShared(&request, &maybeRequestInShared));
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
const auto aidlRequest = NN_TRY(convert(requestInShared));
const auto aidlWaitFor = NN_TRY(convert(waitFor));
@@ -118,11 +137,25 @@
const auto aidlDeadline = NN_TRY(convert(deadline));
const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
const auto aidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
+ return executeFencedInternal(aidlRequest, aidlWaitFor, aidlMeasure, aidlDeadline,
+ aidlLoopTimeoutDuration, aidlTimeoutDurationAfterFence,
+ relocation);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+PreparedModel::executeFencedInternal(const Request& request,
+ const std::vector<ndk::ScopedFileDescriptor>& waitFor,
+ bool measure, int64_t deadline, int64_t loopTimeoutDuration,
+ int64_t timeoutDurationAfterFence,
+ const hal::utils::RequestRelocation& relocation) const {
+ if (relocation.input) {
+ relocation.input->flush();
+ }
FencedExecutionResult result;
- const auto ret = kPreparedModel->executeFenced(aidlRequest, aidlWaitFor, aidlMeasure,
- aidlDeadline, aidlLoopTimeoutDuration,
- aidlTimeoutDurationAfterFence, &result);
+ const auto ret =
+ kPreparedModel->executeFenced(request, waitFor, measure, deadline, loopTimeoutDuration,
+ timeoutDurationAfterFence, &result);
HANDLE_ASTATUS(ret) << "executeFenced failed";
auto resultSyncFence = nn::SyncFence::createAsSignaled();
@@ -137,12 +170,12 @@
// If executeFenced required the request memory to be moved into shared memory, block here until
// the fenced execution has completed and flush the memory back.
- if (maybeRequestInShared.has_value()) {
+ if (relocation.output) {
const auto state = resultSyncFence.syncWait({});
if (state != nn::SyncFence::FenceState::SIGNALED) {
return NN_ERROR() << "syncWait failed with " << state;
}
- NN_TRY(hal::utils::unflushDataFromSharedToPointer(request, maybeRequestInShared));
+ relocation.output->flush();
}
// Create callback which can be used to retrieve the execution error status and timings.
@@ -159,6 +192,23 @@
return std::make_pair(std::move(resultSyncFence), std::move(resultCallback));
}
+nn::GeneralResult<nn::SharedExecution> PreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ // Ensure that request is ready for IPC.
+ std::optional<nn::Request> maybeRequestInShared;
+ hal::utils::RequestRelocation relocation;
+ const nn::Request& requestInShared = NN_TRY(hal::utils::convertRequestFromPointerToShared(
+ &request, nn::kDefaultRequestMemoryAlignment, nn::kDefaultRequestMemoryPadding,
+ &maybeRequestInShared, &relocation));
+
+ auto aidlRequest = NN_TRY(convert(requestInShared));
+ auto aidlMeasure = NN_TRY(convert(measure));
+ auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
+ return Execution::create(shared_from_this(), std::move(aidlRequest), std::move(relocation),
+ aidlMeasure, aidlLoopTimeoutDuration);
+}
+
nn::GeneralResult<nn::SharedBurst> PreparedModel::configureExecutionBurst() const {
std::shared_ptr<IBurst> burst;
const auto ret = kPreparedModel->configureExecutionBurst(&burst);
diff --git a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
index ff98a7d..8bb5c90 100644
--- a/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
+++ b/neuralnetworks/aidl/utils/test/PreparedModelTest.cpp
@@ -21,6 +21,7 @@
#include <aidl/android/hardware/neuralnetworks/IFencedExecutionCallback.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
#include <nnapi/IPreparedModel.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -253,6 +254,225 @@
EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
}
+TEST(PreparedModelTest, reusableExecuteSync) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockExecutionResult = ExecutionResult{
+ .outputSufficientSize = true,
+ .outputShapes = {},
+ .timing = kNoTiming,
+ };
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(
+ DoAll(SetArgPointee<4>(mockExecutionResult), InvokeWithoutArgs(makeStatusOk)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->compute({});
+ EXPECT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncError) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeGeneralFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteSyncDeadObject) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeSynchronously(_, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->compute({});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(PreparedModelTest, reusableExecuteFenced) {
+ // setup call
+ const uint32_t kNumberOfComputations = 2;
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::NONE), Invoke(makeStatusOk)));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(kNumberOfComputations)
+ .WillRepeatedly(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute repeatedly
+ for (uint32_t i = 0; i < kNumberOfComputations; i++) {
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code
+ << ": " << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_EQ(syncFence.syncWait({}), nn::SyncFence::FenceState::SIGNALED);
+ ASSERT_NE(callback, nullptr);
+
+ // get results from callback
+ const auto callbackResult = callback();
+ ASSERT_TRUE(callbackResult.has_value()) << "Failed with " << callbackResult.error().code
+ << ": " << callbackResult.error().message;
+ }
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedCallbackError) {
+ // setup call
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ const auto mockCallback = MockFencedExecutionCallback::create();
+ EXPECT_CALL(*mockCallback, getExecutionInfo(_, _, _))
+ .Times(1)
+ .WillOnce(Invoke(DoAll(SetArgPointee<0>(kNoTiming), SetArgPointee<1>(kNoTiming),
+ SetArgPointee<2>(ErrorStatus::GENERAL_FAILURE),
+ Invoke(makeStatusOk))));
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(Invoke(makeFencedExecutionResult(mockCallback)));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_TRUE(computeResult.has_value()) << "Failed with " << computeResult.error().code << ": "
+ << computeResult.error().message;
+ const auto& [syncFence, callback] = computeResult.value();
+ EXPECT_NE(syncFence.syncWait({}), nn::SyncFence::FenceState::ACTIVE);
+ ASSERT_NE(callback, nullptr);
+
+ // verify callback failure
+ const auto callbackResult = callback();
+ ASSERT_FALSE(callbackResult.has_value());
+ EXPECT_EQ(callbackResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedError) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedTransportFailure) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeGeneralTransportFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(PreparedModelTest, reusableExecuteFencedDeadObject) {
+ // setup test
+ const auto mockPreparedModel = MockPreparedModel::create();
+ const auto preparedModel = PreparedModel::create(mockPreparedModel).value();
+ EXPECT_CALL(*mockPreparedModel, executeFenced(_, _, _, _, _, _, _))
+ .Times(1)
+ .WillOnce(InvokeWithoutArgs(makeDeadObjectFailure));
+
+ // create execution
+ const auto createResult = preparedModel->createReusableExecution({}, {}, {});
+ ASSERT_TRUE(createResult.has_value())
+ << "Failed with " << createResult.error().code << ": " << createResult.error().message;
+ ASSERT_NE(createResult.value(), nullptr);
+
+ // invoke compute
+ const auto computeResult = createResult.value()->computeFenced({}, {}, {});
+ ASSERT_FALSE(computeResult.has_value());
+ EXPECT_EQ(computeResult.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
TEST(PreparedModelTest, configureExecutionBurst) {
// setup test
const auto mockPreparedModel = MockPreparedModel::create();
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 8fe6b90..702ee92 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -20,6 +20,7 @@
#include <cutils/native_handle.h>
#include <hidl/HidlSupport.h>
#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
#include <nnapi/Types.h>
#include <functional>
#include <vector>
@@ -59,19 +60,70 @@
nn::GeneralResult<std::reference_wrapper<const nn::Model>> flushDataFromPointerToShared(
const nn::Model* model, std::optional<nn::Model>* maybeModelInSharedOut);
+// Record a relocation mapping between pointer-based data and shared memory.
+// Only two specializations of this template may exist:
+// - RelocationInfo<const void*> for request inputs
+// - RelocationInfo<void*> for request outputs
+template <typename PointerType>
+struct RelocationInfo {
+ PointerType data;
+ size_t length;
+ size_t offset;
+};
+using InputRelocationInfo = RelocationInfo<const void*>;
+using OutputRelocationInfo = RelocationInfo<void*>;
+
+// Keep track of the relocation mapping between pointer-based data and shared memory pool,
+// and provide method to copy the data between pointers and the shared memory pool.
+// Only two specializations of this template may exist:
+// - RelocationTracker<InputRelocationInfo> for request inputs
+// - RelocationTracker<OutputRelocationInfo> for request outputs
+template <typename RelocationInfoType>
+class RelocationTracker {
+ public:
+ static nn::GeneralResult<std::unique_ptr<RelocationTracker>> create(
+ std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory) {
+ auto mapping = NN_TRY(map(memory));
+ return std::make_unique<RelocationTracker<RelocationInfoType>>(
+ std::move(relocationInfos), std::move(memory), std::move(mapping));
+ }
+
+ RelocationTracker(std::vector<RelocationInfoType> relocationInfos, nn::SharedMemory memory,
+ nn::Mapping mapping)
+ : kRelocationInfos(std::move(relocationInfos)),
+ kMemory(std::move(memory)),
+ kMapping(std::move(mapping)) {}
+
+ // Specializations defined in CommonUtils.cpp.
+ // For InputRelocationTracker, this method will copy pointer data to the shared memory pool.
+ // For OutputRelocationTracker, this method will copy shared memory data to the pointers.
+ void flush() const;
+
+ private:
+ const std::vector<RelocationInfoType> kRelocationInfos;
+ const nn::SharedMemory kMemory;
+ const nn::Mapping kMapping;
+};
+using InputRelocationTracker = RelocationTracker<InputRelocationInfo>;
+using OutputRelocationTracker = RelocationTracker<OutputRelocationInfo>;
+
+struct RequestRelocation {
+ std::unique_ptr<InputRelocationTracker> input;
+ std::unique_ptr<OutputRelocationTracker> output;
+};
+
// Relocate pointer-based data to shared memory. If `request` has no
// Request::Argument::LifeTime::POINTER data, the function returns with a reference to `request`. If
// `request` has Request::Argument::LifeTime::POINTER data, the request is copied to
// `maybeRequestInSharedOut` with the POINTER data relocated to a memory pool, and the function
-// returns with a reference to `*maybeRequestInSharedOut`.
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
- const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut);
-
-// Undoes `flushDataFromPointerToShared` on a Request object. More specifically,
-// `unflushDataFromSharedToPointer` copies the output shared memory data from the transformed
-// Request object back to the output pointer-based memory in the original Request object.
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
- const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared);
+// returns with a reference to `*maybeRequestInSharedOut`. The `relocationOut` will be set to track
+// the input and output relocations.
+//
+// Unlike `flushDataFromPointerToShared`, this method will not copy the input pointer data to the
+// shared memory pool. Use `relocationOut` to flush the input or output data after the call.
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+ const nn::Request* request, uint32_t alignment, uint32_t padding,
+ std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut);
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
size_t numberOfOperands, const std::vector<nn::Operation>& operations);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
index 17b3fd9..e86edda 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBurst.h
@@ -35,6 +35,10 @@
const nn::Request& request, nn::MeasureTiming measure,
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
};
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
new file mode 100644
index 0000000..5b00221
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidExecution.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidExecution final : public nn::IExecution {
+ public:
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+};
+
+} // namespace android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
index 3e1dca7..de30aae 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -40,6 +40,10 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
index c92cc41..fde2486 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientBurst.h
@@ -51,7 +51,16 @@
const nn::OptionalTimePoint& deadline,
const nn::OptionalDuration& loopTimeoutDuration) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
private:
+ bool isValidInternal() const EXCLUDES(mMutex);
+ nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const;
+
const Factory kMakeBurst;
mutable std::mutex mMutex;
mutable nn::SharedBurst mBurst GUARDED_BY(mMutex);
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
new file mode 100644
index 0000000..d0084e8
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientExecution.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
+
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class ResilientExecution final : public nn::IExecution,
+ public std::enable_shared_from_this<ResilientExecution> {
+ struct PrivateConstructorTag {};
+
+ public:
+ using Factory = std::function<nn::GeneralResult<nn::SharedExecution>()>;
+
+ static nn::GeneralResult<std::shared_ptr<const ResilientExecution>> create(
+ Factory makeExecution);
+
+ ResilientExecution(PrivateConstructorTag tag, Factory makeExecution,
+ nn::SharedExecution execution);
+
+ nn::SharedExecution getExecution() const;
+ nn::GeneralResult<nn::SharedExecution> recover(const nn::IExecution* failingExecution) const;
+
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> compute(
+ const nn::OptionalTimePoint& deadline) const override;
+
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> computeFenced(
+ const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+
+ private:
+ bool isValidInternal() const EXCLUDES(mMutex);
+
+ const Factory kMakeExecution;
+ mutable std::mutex mMutex;
+ mutable nn::SharedExecution mExecution GUARDED_BY(mMutex);
+};
+
+} // namespace android::hardware::neuralnetworks::utils
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_RESILIENT_EXECUTION_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
index a6c1b19..86533ed 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientPreparedModel.h
@@ -58,12 +58,19 @@
const nn::OptionalDuration& loopTimeoutDuration,
const nn::OptionalDuration& timeoutDurationAfterFence) const override;
+ nn::GeneralResult<nn::SharedExecution> createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const override;
+
nn::GeneralResult<nn::SharedBurst> configureExecutionBurst() const override;
std::any getUnderlyingResource() const override;
private:
bool isValidInternal() const EXCLUDES(mMutex);
+ nn::GeneralResult<nn::SharedExecution> createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const;
nn::GeneralResult<nn::SharedBurst> configureExecutionBurstInternal() const;
const Factory kMakePreparedModel;
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 4d26795..8e55bf0 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -200,10 +200,31 @@
return **maybeModelInSharedOut;
}
-nn::GeneralResult<std::reference_wrapper<const nn::Request>> flushDataFromPointerToShared(
- const nn::Request* request, std::optional<nn::Request>* maybeRequestInSharedOut) {
+template <>
+void InputRelocationTracker::flush() const {
+ // Copy from pointers to shared memory.
+ uint8_t* memoryPtr = static_cast<uint8_t*>(std::get<void*>(kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(memoryPtr + offset, data, length);
+ }
+}
+
+template <>
+void OutputRelocationTracker::flush() const {
+ // Copy from shared memory to pointers.
+ const uint8_t* memoryPtr = static_cast<const uint8_t*>(
+ std::visit([](auto ptr) { return static_cast<const void*>(ptr); }, kMapping.pointer));
+ for (const auto& [data, length, offset] : kRelocationInfos) {
+ std::memcpy(data, memoryPtr + offset, length);
+ }
+}
+
+nn::GeneralResult<std::reference_wrapper<const nn::Request>> convertRequestFromPointerToShared(
+ const nn::Request* request, uint32_t alignment, uint32_t padding,
+ std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut) {
CHECK(request != nullptr);
CHECK(maybeRequestInSharedOut != nullptr);
+ CHECK(relocationOut != nullptr);
if (hasNoPointerData(*request)) {
return *request;
@@ -213,8 +234,11 @@
// to the caller through `maybeRequestInSharedOut` if the function succeeds.
nn::Request requestInShared = *request;
+ RequestRelocation relocation;
+
// Change input pointers to shared memory.
- nn::ConstantMemoryBuilder inputBuilder(requestInShared.pools.size());
+ nn::MutableMemoryBuilder inputBuilder(requestInShared.pools.size());
+ std::vector<InputRelocationInfo> inputRelocationInfos;
for (auto& input : requestInShared.inputs) {
const auto& location = input.location;
if (input.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -225,17 +249,21 @@
const void* data = std::visit([](auto ptr) { return static_cast<const void*>(ptr); },
location.pointer);
CHECK(data != nullptr);
- input.location = inputBuilder.append(data, location.length);
+ input.location = inputBuilder.append(location.length, alignment, padding);
+ inputRelocationInfos.push_back({data, input.location.length, input.location.offset});
}
// Allocate input memory.
if (!inputBuilder.empty()) {
auto memory = NN_TRY(inputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.input = NN_TRY(
+ InputRelocationTracker::create(std::move(inputRelocationInfos), std::move(memory)));
}
// Change output pointers to shared memory.
nn::MutableMemoryBuilder outputBuilder(requestInShared.pools.size());
+ std::vector<OutputRelocationInfo> outputRelocationInfos;
for (auto& output : requestInShared.outputs) {
const auto& location = output.location;
if (output.lifetime != nn::Request::Argument::LifeTime::POINTER) {
@@ -243,62 +271,25 @@
}
output.lifetime = nn::Request::Argument::LifeTime::POOL;
- output.location = outputBuilder.append(location.length);
+ void* data = std::get<void*>(location.pointer);
+ CHECK(data != nullptr);
+ output.location = outputBuilder.append(location.length, alignment, padding);
+ outputRelocationInfos.push_back({data, output.location.length, output.location.offset});
}
// Allocate output memory.
if (!outputBuilder.empty()) {
auto memory = NN_TRY(outputBuilder.finish());
- requestInShared.pools.push_back(std::move(memory));
+ requestInShared.pools.push_back(memory);
+ relocation.output = NN_TRY(OutputRelocationTracker::create(std::move(outputRelocationInfos),
+ std::move(memory)));
}
*maybeRequestInSharedOut = requestInShared;
+ *relocationOut = std::move(relocation);
return **maybeRequestInSharedOut;
}
-nn::GeneralResult<void> unflushDataFromSharedToPointer(
- const nn::Request& request, const std::optional<nn::Request>& maybeRequestInShared) {
- if (!maybeRequestInShared.has_value() || maybeRequestInShared->pools.empty() ||
- !std::holds_alternative<nn::SharedMemory>(maybeRequestInShared->pools.back())) {
- return {};
- }
- const auto& requestInShared = *maybeRequestInShared;
-
- // Map the memory.
- const auto& outputMemory = std::get<nn::SharedMemory>(requestInShared.pools.back());
- const auto [pointer, size, context] = NN_TRY(map(outputMemory));
- const uint8_t* constantPointer =
- std::visit([](const auto& o) { return static_cast<const uint8_t*>(o); }, pointer);
-
- // Flush each output pointer.
- CHECK_EQ(request.outputs.size(), requestInShared.outputs.size());
- for (size_t i = 0; i < request.outputs.size(); ++i) {
- const auto& location = request.outputs[i].location;
- const auto& locationInShared = requestInShared.outputs[i].location;
- if (!std::holds_alternative<void*>(location.pointer)) {
- continue;
- }
-
- // Get output pointer and size.
- void* data = std::get<void*>(location.pointer);
- CHECK(data != nullptr);
- const size_t length = location.length;
-
- // Get output pool location.
- CHECK(requestInShared.outputs[i].lifetime == nn::Request::Argument::LifeTime::POOL);
- const size_t index = locationInShared.poolIndex;
- const size_t offset = locationInShared.offset;
- const size_t outputPoolIndex = requestInShared.pools.size() - 1;
- CHECK(locationInShared.length == length);
- CHECK(index == outputPoolIndex);
-
- // Flush memory.
- std::memcpy(data, constantPointer + offset, length);
- }
-
- return {};
-}
-
nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
return makeGeneralFailure(nn::countNumberOfConsumers(numberOfOperands, operations));
diff --git a/neuralnetworks/utils/common/src/InvalidBurst.cpp b/neuralnetworks/utils/common/src/InvalidBurst.cpp
index 0c34f05..0191533 100644
--- a/neuralnetworks/utils/common/src/InvalidBurst.cpp
+++ b/neuralnetworks/utils/common/src/InvalidBurst.cpp
@@ -38,4 +38,10 @@
return NN_ERROR() << "InvalidBurst";
}
+nn::GeneralResult<nn::SharedExecution> InvalidBurst::createReusableExecution(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ return NN_ERROR() << "InvalidBurst";
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidExecution.cpp b/neuralnetworks/utils/common/src/InvalidExecution.cpp
new file mode 100644
index 0000000..c4edd25
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidExecution.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidExecution.h"
+
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> InvalidExecution::compute(
+ const nn::OptionalTimePoint& /*deadline*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+InvalidExecution::computeFenced(const std::vector<nn::SyncFence>& /*waitFor*/,
+ const nn::OptionalTimePoint& /*deadline*/,
+ const nn::OptionalDuration& /*timeoutDurationAfterFence*/) const {
+ return NN_ERROR() << "InvalidExecution";
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
index 9081e1f..8195462 100644
--- a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -42,6 +42,12 @@
return NN_ERROR() << "InvalidPreparedModel";
}
+nn::GeneralResult<nn::SharedExecution> InvalidPreparedModel::createReusableExecution(
+ const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+ const nn::OptionalDuration& /*loopTimeoutDuration*/) const {
+ return NN_ERROR() << "InvalidPreparedModel";
+}
+
nn::GeneralResult<nn::SharedBurst> InvalidPreparedModel::configureExecutionBurst() const {
return NN_ERROR() << "InvalidPreparedModel";
}
diff --git a/neuralnetworks/utils/common/src/ResilientBurst.cpp b/neuralnetworks/utils/common/src/ResilientBurst.cpp
index 38ccc62..79cbe39 100644
--- a/neuralnetworks/utils/common/src/ResilientBurst.cpp
+++ b/neuralnetworks/utils/common/src/ResilientBurst.cpp
@@ -19,6 +19,7 @@
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
#include <nnapi/IBurst.h>
+#include <nnapi/IPreparedModel.h>
#include <nnapi/Result.h>
#include <nnapi/TypeUtils.h>
#include <nnapi/Types.h>
@@ -29,6 +30,9 @@
#include <optional>
#include <utility>
+#include "InvalidExecution.h"
+#include "ResilientExecution.h"
+
namespace android::hardware::neuralnetworks::utils {
namespace {
@@ -46,11 +50,11 @@
// Attempt recovery and return if it fails.
auto maybeBurst = resilientBurst.recover(burst.get());
if (!maybeBurst.has_value()) {
- auto [resultErrorMessage, resultErrorCode, resultOutputShapes] = std::move(result).error();
- const auto& [recoveryErrorMessage, recoveryErrorCode] = maybeBurst.error();
- return nn::error(resultErrorCode, std::move(resultOutputShapes))
- << resultErrorMessage << ", and failed to recover dead burst object with error "
- << recoveryErrorCode << ": " << recoveryErrorMessage;
+ const auto& [message, code] = maybeBurst.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead burst object with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
}
burst = std::move(maybeBurst).value();
@@ -109,4 +113,35 @@
return protect(*this, fn);
}
+nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+ auto self = shared_from_this();
+ ResilientExecution::Factory makeExecution =
+ [burst = std::move(self), request, measure, loopTimeoutDuration] {
+ return burst->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ };
+ return ResilientExecution::create(std::move(makeExecution));
+#else
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientBurst::createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidExecution>();
+ }
+ const auto fn = [&request, measure, &loopTimeoutDuration](const nn::IBurst& burst) {
+ return burst.createReusableExecution(request, measure, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
+}
+
+bool ResilientBurst::isValidInternal() const {
+ return true;
+}
+
} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientExecution.cpp b/neuralnetworks/utils/common/src/ResilientExecution.cpp
new file mode 100644
index 0000000..46b404a
--- /dev/null
+++ b/neuralnetworks/utils/common/src/ResilientExecution.cpp
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ResilientExecution.h"
+
+#include "InvalidBurst.h"
+#include "ResilientBurst.h"
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+template <typename FnType>
+auto protect(const ResilientExecution& resilientExecution, const FnType& fn)
+ -> decltype(fn(*resilientExecution.getExecution())) {
+ auto execution = resilientExecution.getExecution();
+ auto result = fn(*execution);
+
+ // Immediately return if prepared model is not dead.
+ if (result.has_value() || result.error().code != nn::ErrorStatus::DEAD_OBJECT) {
+ return result;
+ }
+
+ // Attempt recovery and return if it fails.
+ auto maybeExecution = resilientExecution.recover(execution.get());
+ if (!maybeExecution.has_value()) {
+ const auto& [message, code] = maybeExecution.error();
+ std::ostringstream oss;
+ oss << ", and failed to recover dead prepared model with error " << code << ": " << message;
+ result.error().message += oss.str();
+ return result;
+ }
+ execution = std::move(maybeExecution).value();
+
+ return fn(*execution);
+}
+
+} // namespace
+
+nn::GeneralResult<std::shared_ptr<const ResilientExecution>> ResilientExecution::create(
+ Factory makeExecution) {
+ if (makeExecution == nullptr) {
+ return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT)
+ << "utils::ResilientExecution::create must have non-empty makeExecution";
+ }
+ auto execution = NN_TRY(makeExecution());
+ CHECK(execution != nullptr);
+ return std::make_shared<ResilientExecution>(PrivateConstructorTag{}, std::move(makeExecution),
+ std::move(execution));
+}
+
+ResilientExecution::ResilientExecution(PrivateConstructorTag /*tag*/, Factory makeExecution,
+ nn::SharedExecution execution)
+ : kMakeExecution(std::move(makeExecution)), mExecution(std::move(execution)) {
+ CHECK(kMakeExecution != nullptr);
+ CHECK(mExecution != nullptr);
+}
+
+nn::SharedExecution ResilientExecution::getExecution() const {
+ std::lock_guard guard(mMutex);
+ return mExecution;
+}
+
+nn::GeneralResult<nn::SharedExecution> ResilientExecution::recover(
+ const nn::IExecution* failingExecution) const {
+ std::lock_guard guard(mMutex);
+
+ // Another caller updated the failing prepared model.
+ if (mExecution.get() != failingExecution) {
+ return mExecution;
+ }
+
+ mExecution = NN_TRY(kMakeExecution());
+ return mExecution;
+}
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+ResilientExecution::compute(const nn::OptionalTimePoint& deadline) const {
+ const auto fn = [&deadline](const nn::IExecution& execution) {
+ return execution.compute(deadline);
+ };
+ return protect(*this, fn);
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+ResilientExecution::computeFenced(const std::vector<nn::SyncFence>& waitFor,
+ const nn::OptionalTimePoint& deadline,
+ const nn::OptionalDuration& timeoutDurationAfterFence) const {
+ const auto fn = [&waitFor, &deadline,
+ &timeoutDurationAfterFence](const nn::IExecution& execution) {
+ return execution.computeFenced(waitFor, deadline, timeoutDurationAfterFence);
+ };
+ return protect(*this, fn);
+}
+
+bool ResilientExecution::isValidInternal() const {
+ return true;
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
index 5dd5f99..1ae19bc 100644
--- a/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
+++ b/neuralnetworks/utils/common/src/ResilientPreparedModel.cpp
@@ -17,7 +17,9 @@
#include "ResilientPreparedModel.h"
#include "InvalidBurst.h"
+#include "InvalidExecution.h"
#include "ResilientBurst.h"
+#include "ResilientExecution.h"
#include <android-base/logging.h>
#include <android-base/thread_annotations.h>
@@ -127,6 +129,21 @@
return protect(*this, fn);
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecution(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+#if 0
+ auto self = shared_from_this();
+ ResilientExecution::Factory makeExecution =
+ [preparedModel = std::move(self), request, measure, loopTimeoutDuration] {
+ return preparedModel->createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+ };
+ return ResilientExecution::create(std::move(makeExecution));
+#else
+ return createReusableExecutionInternal(request, measure, loopTimeoutDuration);
+#endif
+}
+
nn::GeneralResult<nn::SharedBurst> ResilientPreparedModel::configureExecutionBurst() const {
#if 0
auto self = shared_from_this();
@@ -140,6 +157,19 @@
#endif
}
+nn::GeneralResult<nn::SharedExecution> ResilientPreparedModel::createReusableExecutionInternal(
+ const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration) const {
+ if (!isValidInternal()) {
+ return std::make_shared<const InvalidExecution>();
+ }
+ const auto fn = [&request, measure,
+ &loopTimeoutDuration](const nn::IPreparedModel& preparedModel) {
+ return preparedModel.createReusableExecution(request, measure, loopTimeoutDuration);
+ };
+ return protect(*this, fn);
+}
+
std::any ResilientPreparedModel::getUnderlyingResource() const {
return getPreparedModel()->getUnderlyingResource();
}
diff --git a/neuralnetworks/utils/common/test/MockExecution.h b/neuralnetworks/utils/common/test/MockExecution.h
new file mode 100644
index 0000000..91e3428
--- /dev/null
+++ b/neuralnetworks/utils/common/test/MockExecution.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
+
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <nnapi/IExecution.h>
+
+namespace android::nn {
+
+class MockExecution final : public IExecution {
+ public:
+ MOCK_METHOD((ExecutionResult<std::pair<std::vector<OutputShape>, Timing>>), compute,
+ (const OptionalTimePoint& deadline), (const, override));
+ MOCK_METHOD((GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>>), computeFenced,
+ (const std::vector<SyncFence>& waitFor, const OptionalTimePoint& deadline,
+ const OptionalDuration& timeoutDurationAfterFence),
+ (const, override));
+};
+
+} // namespace android::nn
+
+#endif // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_TEST_MOCK_EXECUTION
diff --git a/neuralnetworks/utils/common/test/MockPreparedModel.h b/neuralnetworks/utils/common/test/MockPreparedModel.h
index c004861..c8ce006 100644
--- a/neuralnetworks/utils/common/test/MockPreparedModel.h
+++ b/neuralnetworks/utils/common/test/MockPreparedModel.h
@@ -35,6 +35,10 @@
const OptionalDuration& loopTimeoutDuration,
const OptionalDuration& timeoutDurationAfterFence),
(const, override));
+ MOCK_METHOD((GeneralResult<SharedExecution>), createReusableExecution,
+ (const nn::Request& request, nn::MeasureTiming measure,
+ const nn::OptionalDuration& loopTimeoutDuration),
+ (const, override));
MOCK_METHOD(GeneralResult<SharedBurst>, configureExecutionBurst, (), (const, override));
MOCK_METHOD(std::any, getUnderlyingResource, (), (const, override));
};
diff --git a/neuralnetworks/utils/common/test/ResilientExecution.cpp b/neuralnetworks/utils/common/test/ResilientExecution.cpp
new file mode 100644
index 0000000..c0737fb
--- /dev/null
+++ b/neuralnetworks/utils/common/test/ResilientExecution.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gmock/gmock.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/hal/ResilientExecution.h>
+#include <utility>
+#include "MockExecution.h"
+
+namespace android::hardware::neuralnetworks::utils {
+namespace {
+
+using ::testing::_;
+using ::testing::InvokeWithoutArgs;
+using ::testing::Return;
+
+using SharedMockExecution = std::shared_ptr<const nn::MockExecution>;
+using MockExecutionFactory = ::testing::MockFunction<nn::GeneralResult<nn::SharedExecution>()>;
+
+SharedMockExecution createMockExecution() {
+ return std::make_shared<const nn::MockExecution>();
+}
+
+std::tuple<SharedMockExecution, std::unique_ptr<MockExecutionFactory>,
+ std::shared_ptr<const ResilientExecution>>
+setup() {
+ auto mockExecution = std::make_shared<const nn::MockExecution>();
+
+ auto mockExecutionFactory = std::make_unique<MockExecutionFactory>();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(mockExecution));
+
+ auto buffer = ResilientExecution::create(mockExecutionFactory->AsStdFunction()).value();
+ return std::make_tuple(std::move(mockExecution), std::move(mockExecutionFactory),
+ std::move(buffer));
+}
+
+constexpr auto makeError = [](nn::ErrorStatus status) {
+ return [status](const auto&... /*args*/) { return nn::error(status); };
+};
+const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
+const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
+
+const auto kNoExecutionError =
+ nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
+const auto kNoFencedExecutionError =
+ nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>(
+ std::make_pair(nn::SyncFence::createAsSignaled(), nullptr));
+
+} // namespace
+
+TEST(ResilientExecutionTest, invalidExecutionFactory) {
+ // setup call
+ const auto invalidExecutionFactory = ResilientExecution::Factory{};
+
+ // run test
+ const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::INVALID_ARGUMENT);
+}
+
+TEST(ResilientExecutionTest, executionFactoryFailure) {
+ // setup call
+ const auto invalidExecutionFactory = kReturnGeneralFailure;
+
+ // run test
+ const auto result = ResilientExecution::create(invalidExecutionFactory);
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, getExecution) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+
+ // run test
+ const auto result = execution->getExecution();
+
+ // verify result
+ EXPECT_TRUE(result == mockExecution);
+}
+
+TEST(ResilientExecutionTest, compute) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeError) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectFailedRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeDeadObjectSuccessfulRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, compute(_)).Times(1).WillOnce(kReturnDeadObject);
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*recoveredMockExecution, compute(_)).Times(1).WillOnce(Return(kNoExecutionError));
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->compute({});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFenced) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoFencedExecutionError));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, computeFencedError) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectFailedRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::DEAD_OBJECT);
+}
+
+TEST(ResilientExecutionTest, computeFencedDeadObjectSuccessfulRecovery) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ EXPECT_CALL(*mockExecution, computeFenced(_, _, _)).Times(1).WillOnce(kReturnDeadObject);
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*recoveredMockExecution, computeFenced(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoFencedExecutionError));
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->computeFenced({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientExecutionTest, recover) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+TEST(ResilientExecutionTest, recoverFailure) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ EXPECT_FALSE(result.has_value());
+}
+
+TEST(ResilientExecutionTest, someoneElseRecovered) {
+ // setup call
+ const auto [mockExecution, mockExecutionFactory, execution] = setup();
+ const auto recoveredMockExecution = createMockExecution();
+ EXPECT_CALL(*mockExecutionFactory, Call()).Times(1).WillOnce(Return(recoveredMockExecution));
+ execution->recover(mockExecution.get());
+
+ // run test
+ const auto result = execution->recover(mockExecution.get());
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+ EXPECT_TRUE(result.value() == recoveredMockExecution);
+}
+
+} // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
index 6d86e10..d396ca8 100644
--- a/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
+++ b/neuralnetworks/utils/common/test/ResilientPreparedModelTest.cpp
@@ -55,6 +55,7 @@
const auto kReturnGeneralFailure = makeError(nn::ErrorStatus::GENERAL_FAILURE);
const auto kReturnDeadObject = makeError(nn::ErrorStatus::DEAD_OBJECT);
+const auto kNoCreateReusableExecutionError = nn::GeneralResult<nn::SharedExecution>{};
const auto kNoExecutionError =
nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>{};
const auto kNoFencedExecutionError =
@@ -231,6 +232,36 @@
<< "Failed with " << result.error().code << ": " << result.error().message;
}
+TEST(ResilientPreparedModelTest, createReusableExecution) {
+ // setup call
+ const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ .Times(1)
+ .WillOnce(Return(kNoCreateReusableExecutionError));
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_TRUE(result.has_value())
+ << "Failed with " << result.error().code << ": " << result.error().message;
+}
+
+TEST(ResilientPreparedModelTest, createReusableExecutionError) {
+ // setup call
+ const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
+ EXPECT_CALL(*mockPreparedModel, createReusableExecution(_, _, _))
+ .Times(1)
+ .WillOnce(kReturnGeneralFailure);
+
+ // run test
+ const auto result = preparedModel->createReusableExecution({}, {}, {});
+
+ // verify result
+ ASSERT_FALSE(result.has_value());
+ EXPECT_EQ(result.error().code, nn::ErrorStatus::GENERAL_FAILURE);
+}
+
TEST(ResilientPreparedModelTest, getUnderlyingResource) {
// setup call
const auto [mockPreparedModel, mockPreparedModelFactory, preparedModel] = setup();
diff --git a/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl b/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
index 12d2042..66c8c8c 100644
--- a/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
+++ b/power/stats/aidl/android/hardware/power/stats/EnergyConsumerResult.aidl
@@ -25,7 +25,7 @@
*/
int id;
/**
- * Time since boot in milliseconds
+ * Time of data capture in milliseconds since boot (CLOCK_BOOTTIME clock)
*/
long timestampMs;
/**
@@ -38,4 +38,3 @@
*/
EnergyConsumerAttribution[] attribution;
}
-
diff --git a/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl b/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
index d3e8f46..31fbaa8 100644
--- a/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
+++ b/power/stats/aidl/android/hardware/power/stats/EnergyMeasurement.aidl
@@ -23,7 +23,7 @@
*/
int id;
/**
- * Approximate time of data capture in millseconds since boot
+ * Time of data capture in milliseconds since boot (CLOCK_BOOTTIME clock)
*/
long timestampMs;
/**
@@ -35,4 +35,3 @@
*/
long energyUWs;
}
-
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
index 7c05984..27e641a 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_api.cpp
@@ -498,6 +498,8 @@
::android::hardware::radio::V1_6::RadioError::NONE,
::android::hardware::radio::V1_6::RadioError::INVALID_ARGUMENTS}));
}
+
+ sleep(1);
}
/*
diff --git a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
index 4222441..8c99d92 100644
--- a/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
+++ b/radio/1.6/vts/functional/radio_hidl_hal_misc.cpp
@@ -45,10 +45,8 @@
::android::hardware::radio::V1_0::RadioError::NONE) {
static const std::regex kOperatorNumericRe("^[0-9]{5,6}$");
for (OperatorInfo info : radioRsp_v1_6->networkInfos) {
- if (info.operatorNumeric != nullptr) {
- ASSERT_TRUE(
- std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
- }
+ ASSERT_TRUE(
+ std::regex_match(std::string(info.operatorNumeric), kOperatorNumericRe));
}
}
diff --git a/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl b/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
index b6af813..9cc795d 100644
--- a/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
+++ b/security/keymint/aidl/android/hardware/security/keymint/IKeyMintDevice.aidl
@@ -318,10 +318,11 @@
* @param attestationKey, if provided, specifies the key that must be used to sign the
* attestation certificate. If `keyParams` does not contain a Tag::ATTESTATION_CHALLENGE
* but `attestationKey` is non-null, the IKeyMintDevice must return
- * ErrorCode::INVALID_ARGUMENT. If the provided AttestationKey does not contain a key
- * blob containing an asymmetric key with KeyPurpose::ATTEST_KEY, the IKeyMintDevice must
- * return ErrorCode::INCOMPATIBLE_PURPOSE. If the provided AttestationKey has an empty
- * issuer subject name, the IKeyMintDevice must return ErrorCode::INVALID_ARGUMENT.
+ * ErrorCode::ATTESTATION_CHALLENGE_MISSING. If the provided AttestationKey does not
+ * contain a key blob containing an asymmetric key with KeyPurpose::ATTEST_KEY, the
+ * IKeyMintDevice must return ErrorCode::INCOMPATIBLE_PURPOSE. If the provided
+ * AttestationKey has an empty issuer subject name, the IKeyMintDevice must return
+ * ErrorCode::INVALID_ARGUMENT.
*
* If `attestationKey` is null and `keyParams` contains Tag::ATTESTATION_CHALLENGE but
* the KeyMint implementation does not have factory-provisioned attestation keys, it must
diff --git a/security/keymint/aidl/vts/functional/AttestKeyTest.cpp b/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
index 881354d..e4a877c 100644
--- a/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
+++ b/security/keymint/aidl/vts/functional/AttestKeyTest.cpp
@@ -361,7 +361,7 @@
EXPECT_EQ(ErrorCode::OK,
GenerateKey(AuthorizationSetBuilder()
- .EcdsaSigningKey(224)
+ .EcdsaSigningKey(EcCurve::P_256)
.AttestKey()
.AttestationChallenge("foo")
.AttestationApplicationId("bar")
@@ -435,7 +435,7 @@
if ((i & 0x1) == 1) {
EXPECT_EQ(ErrorCode::OK,
GenerateKey(AuthorizationSetBuilder()
- .EcdsaSigningKey(224)
+ .EcdsaSigningKey(EcCurve::P_256)
.AttestKey()
.AttestationChallenge("foo")
.AttestationApplicationId("bar")
@@ -513,7 +513,7 @@
vector<uint8_t> attested_key_blob;
vector<KeyCharacteristics> attested_key_characteristics;
vector<Certificate> attested_key_cert_chain;
- EXPECT_EQ(ErrorCode::INVALID_ARGUMENT,
+ EXPECT_EQ(ErrorCode::ATTESTATION_CHALLENGE_MISSING,
GenerateKey(AuthorizationSetBuilder()
.RsaSigningKey(2048, 65537)
.Authorization(TAG_NO_AUTH_REQUIRED)
@@ -522,7 +522,7 @@
attest_key, &attested_key_blob, &attested_key_characteristics,
&attested_key_cert_chain));
- EXPECT_EQ(ErrorCode::INVALID_ARGUMENT,
+ EXPECT_EQ(ErrorCode::ATTESTATION_CHALLENGE_MISSING,
GenerateKey(AuthorizationSetBuilder()
.EcdsaSigningKey(EcCurve::P_256)
.Authorization(TAG_NO_AUTH_REQUIRED)
diff --git a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
index 4789204..675e01d 100644
--- a/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
+++ b/security/keymint/aidl/vts/functional/KeyMintAidlTestBase.cpp
@@ -119,10 +119,10 @@
// Attestations don't contain everything in key authorization lists, so we need to filter the key
// lists to produce the lists that we expect to match the attestations.
auto kTagsToFilter = {
- Tag::CREATION_DATETIME, //
- Tag::EC_CURVE,
- Tag::HARDWARE_TYPE,
- Tag::INCLUDE_UNIQUE_ID,
+ Tag::CREATION_DATETIME,
+ Tag::EC_CURVE,
+ Tag::HARDWARE_TYPE,
+ Tag::INCLUDE_UNIQUE_ID,
};
AuthorizationSet filtered_tags(const AuthorizationSet& set) {
diff --git a/security/keymint/aidl/vts/functional/KeyMintTest.cpp b/security/keymint/aidl/vts/functional/KeyMintTest.cpp
index cd7d603..22d26a5 100644
--- a/security/keymint/aidl/vts/functional/KeyMintTest.cpp
+++ b/security/keymint/aidl/vts/functional/KeyMintTest.cpp
@@ -2261,11 +2261,11 @@
.Padding(PaddingMode::NONE)
.Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)));
- ASSERT_EQ(ErrorCode::UNSUPPORTED_DIGEST,
- Begin(KeyPurpose::SIGN, AuthorizationSetBuilder()
- .Digest(Digest::NONE)
- .Digest(Digest::SHA1)
- .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN)));
+ auto result = Begin(KeyPurpose::SIGN, AuthorizationSetBuilder()
+ .Digest(Digest::NONE)
+ .Digest(Digest::SHA1)
+ .Padding(PaddingMode::RSA_PKCS1_1_5_SIGN));
+ ASSERT_TRUE(result == ErrorCode::UNSUPPORTED_DIGEST || result == ErrorCode::INVALID_ARGUMENT);
ASSERT_EQ(ErrorCode::UNSUPPORTED_DIGEST,
Begin(KeyPurpose::SIGN,
@@ -3329,7 +3329,7 @@
*/
TEST_P(ImportKeyTest, TripleDesFailure) {
string key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
- uint32_t bitlen = key.size() * 8;
+ uint32_t bitlen = key.size() * 7;
for (uint32_t key_size : {bitlen - 1, bitlen + 1, bitlen - 8, bitlen + 8}) {
// Explicit key size doesn't match that of the provided key.
auto result = ImportKey(AuthorizationSetBuilder()
@@ -3343,19 +3343,19 @@
<< "unexpected result: " << result;
}
// Explicit key size matches that of the provided key, but it's not a valid size.
- string long_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
+ string long_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f735800");
ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
ImportKey(AuthorizationSetBuilder()
.Authorization(TAG_NO_AUTH_REQUIRED)
- .TripleDesEncryptionKey(long_key.size() * 8)
+ .TripleDesEncryptionKey(long_key.size() * 7)
.EcbMode()
.Padding(PaddingMode::PKCS7),
KeyFormat::RAW, long_key));
- string short_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f7358");
+ string short_key = hex2str("a49d7564199e97cb529d2c9d97bf2f98d35edf57ba1f73");
ASSERT_EQ(ErrorCode::UNSUPPORTED_KEY_SIZE,
ImportKey(AuthorizationSetBuilder()
.Authorization(TAG_NO_AUTH_REQUIRED)
- .TripleDesEncryptionKey(short_key.size() * 8)
+ .TripleDesEncryptionKey(short_key.size() * 7)
.EcbMode()
.Padding(PaddingMode::PKCS7),
KeyFormat::RAW, short_key));
diff --git a/sensors/1.0/ISensors.hal b/sensors/1.0/ISensors.hal
index 8d41de2..0e172ef 100644
--- a/sensors/1.0/ISensors.hal
+++ b/sensors/1.0/ISensors.hal
@@ -103,7 +103,7 @@
* Flush adds a FLUSH_COMPLETE metadata event to the end of the "batch mode"
* FIFO for the specified sensor and flushes the FIFO. If the FIFO is empty
* or if the sensor doesn't support batching (FIFO size zero), return
- * SUCCESS and add a trivial FLUSH_COMPLETE event added to the event stream.
+ * OK and add a trivial FLUSH_COMPLETE event added to the event stream.
* This applies to all sensors other than one-shot sensors. If the sensor
* is a one-shot sensor, flush must return BAD_VALUE and not generate any
* flush complete metadata. If the sensor is not active at the time flush()
diff --git a/sensors/1.0/types.hal b/sensors/1.0/types.hal
index cbbe92f..ac7be06 100644
--- a/sensors/1.0/types.hal
+++ b/sensors/1.0/types.hal
@@ -1130,7 +1130,7 @@
/**
* High performance mode hint. Device is able to use up more power and take
- * more reources to improve throughput and latency in high performance mode.
+ * more resources to improve throughput and latency in high performance mode.
* One possible use case is virtual reality, when sensor latency need to be
* carefully controlled.
* int32_t: 1 or 0, denote if device is in/out of high performance mode,
diff --git a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
index 56bc9cf..1f579ba 100644
--- a/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
+++ b/sensors/1.0/vts/functional/VtsHalSensorsV1_0TargetTest.cpp
@@ -190,7 +190,7 @@
});
}
-// Test if sensor list returned is valid
+// Test if sensor hal can switch to different operation modes
TEST_P(SensorsHidlTest, SetOperationMode) {
std::vector<SensorInfo> sensorList = getSensorsList();
@@ -208,7 +208,7 @@
ASSERT_EQ(Result::OK, S()->setOperationMode(OperationMode::NORMAL));
}
-// Test if sensor list returned is valid
+// Test if sensor hal can receive injected events in loopback mode
TEST_P(SensorsHidlTest, InjectSensorEventData) {
std::vector<SensorInfo> sensorList = getSensorsList();
std::vector<SensorInfo> sensorSupportInjection;
diff --git a/tetheroffload/control/1.1/ITetheringOffloadCallback.hal b/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
index 7a7d56d..9c74641 100644
--- a/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
+++ b/tetheroffload/control/1.1/ITetheringOffloadCallback.hal
@@ -26,8 +26,8 @@
interface ITetheringOffloadCallback extends @1.0::ITetheringOffloadCallback {
/**
* Called when an asynchronous event is generated by the hardware
- * management process. Events which are common for 1.0 and 1.1 HAL
- * MUST be fired on both 1.0 and 1.1 callback.
+ * management process. Implementations that report events via this callback
+ * should not invoke onEvent of 1.0 HAL.
*/
oneway onEvent_1_1(OffloadCallbackEvent event);
};
diff --git a/tv/tuner/1.0/vts/functional/DescramblerTests.cpp b/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
index 2e27475..67f6bae 100644
--- a/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
+++ b/tv/tuner/1.0/vts/functional/DescramblerTests.cpp
@@ -53,12 +53,15 @@
return failure();
}
- auto status = mCas->setSessionPrivateData(sessionId, hidlPvtData);
- if (status != android::hardware::cas::V1_0::Status::OK) {
- ALOGW("[vts] Failed to set session private data");
- mCas->closeSession(sessionId);
- return failure();
+ if (hidlPvtData.size() > 0) {
+ auto status = mCas->setSessionPrivateData(sessionId, hidlPvtData);
+ if (status != android::hardware::cas::V1_0::Status::OK) {
+ ALOGW("[vts] Failed to set session private data");
+ mCas->closeSession(sessionId);
+ return failure();
+ }
}
+
return success();
}
diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
index 62093cc..b39abe3 100644
--- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
+++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TargetTest.cpp
@@ -267,7 +267,9 @@
uint32_t demuxId;
sp<IDemux> demux;
ASSERT_TRUE(mDemuxTests.openDemux(demux, demuxId));
+ mDvrTests.setDemux(demux);
+ DvrConfig dvrSourceConfig;
if (record.hasFrontendConnection) {
uint32_t feId;
mFrontendTests.getFrontendIdByType(frontendConf.type, feId);
@@ -275,13 +277,17 @@
ASSERT_TRUE(mFrontendTests.openFrontendById(feId));
ASSERT_TRUE(mFrontendTests.setFrontendCallback());
ASSERT_TRUE(mDemuxTests.setDemuxFrontendDataSource(feId));
+ } else {
+ dvrSourceConfig = dvrMap[record.dvrSourceId];
+ ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrSourceConfig.type, dvrSourceConfig.bufferSize));
+ ASSERT_TRUE(mDvrTests.configDvrPlayback(dvrSourceConfig.settings));
+ ASSERT_TRUE(mDvrTests.getDvrPlaybackMQDescriptor());
}
uint32_t filterId;
sp<IFilter> filter;
mFilterTests.setDemux(demux);
- mDvrTests.setDemux(demux);
ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrConf.type, dvrConf.bufferSize));
ASSERT_TRUE(mDvrTests.configDvrRecord(dvrConf.settings));
ASSERT_TRUE(mDvrTests.getDvrRecordMQDescriptor());
@@ -327,6 +333,7 @@
mFrontendTests.setDemux(demux);
} else {
dvrSourceConfig = dvrMap[descrambling.dvrSourceId];
+ mDvrTests.setDemux(demux);
ASSERT_TRUE(mDvrTests.openDvrInDemux(dvrSourceConfig.type, dvrSourceConfig.bufferSize));
ASSERT_TRUE(mDvrTests.configDvrPlayback(dvrSourceConfig.settings));
ASSERT_TRUE(mDvrTests.getDvrPlaybackMQDescriptor());
@@ -641,7 +648,7 @@
TEST_P(TunerRecordHidlTest, LnbRecordDataFlowWithTsRecordFilterTest) {
description("Feed ts data from Fe with Lnb to recording and test with ts record filter");
- if (lnbRecord.support) {
+ if (!lnbRecord.support) {
return;
}
recordSingleFilterTestWithLnb(filterMap[lnbRecord.recordFilterId],
@@ -651,7 +658,7 @@
TEST_P(TunerDescramblerHidlTest, CreateDescrambler) {
description("Create Descrambler");
- if (descrambling.support) {
+ if (!descrambling.support) {
return;
}
uint32_t demuxId;
@@ -678,7 +685,7 @@
TEST_P(TunerDescramblerHidlTest, ScrambledBroadcastDataFlowMediaFiltersTest) {
description("Test ts audio filter in scrambled broadcast use case");
- if (descrambling.support) {
+ if (!descrambling.support) {
return;
}
set<FilterConfig> filterConfs;
diff --git a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
index 735bc82..8328e0c 100644
--- a/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
+++ b/tv/tuner/1.0/vts/functional/VtsHalTvTunerV1_0TestConfigurations.h
@@ -214,8 +214,10 @@
return false;
}
- bool filterIsValid = filterMap.find(live.audioFilterId) != filterMap.end() &&
- filterMap.find(live.videoFilterId) != filterMap.end();
+ bool filterIsValid = (live.hasFrontendConnection)
+ ? filterMap.find(live.audioFilterId) != filterMap.end() &&
+ filterMap.find(live.videoFilterId) != filterMap.end()
+ : true;
filterIsValid &= playback.support
? (filterMap.find(playback.audioFilterId) != filterMap.end() &&
filterMap.find(playback.videoFilterId) != filterMap.end())
diff --git a/tv/tuner/config/TunerTestingConfigReader.h b/tv/tuner/config/TunerTestingConfigReader.h
index 7971fd2..e1f5695 100644
--- a/tv/tuner/config/TunerTestingConfigReader.h
+++ b/tv/tuner/config/TunerTestingConfigReader.h
@@ -50,6 +50,7 @@
using android::hardware::tv::tuner::V1_0::FrontendDvbtConstellation;
using android::hardware::tv::tuner::V1_0::FrontendDvbtGuardInterval;
using android::hardware::tv::tuner::V1_0::FrontendDvbtHierarchy;
+using android::hardware::tv::tuner::V1_0::FrontendDvbtPlpMode;
using android::hardware::tv::tuner::V1_0::FrontendDvbtSettings;
using android::hardware::tv::tuner::V1_0::FrontendDvbtStandard;
using android::hardware::tv::tuner::V1_0::FrontendDvbtTransmissionMode;
@@ -368,8 +369,6 @@
int size = privateData.size();
descramblerMap[id].hidlPvtData.resize(size);
memcpy(descramblerMap[id].hidlPvtData.data(), privateData.data(), size);
- } else {
- descramblerMap[id].hidlPvtData.resize(256);
}
}
}
@@ -468,7 +467,6 @@
return;
}
auto recordConfig = *dataFlow.getFirstDvrRecord();
- record.frontendId = recordConfig.getFrontendConnection();
record.recordFilterId = recordConfig.getRecordFilterConnection();
record.dvrRecordId = recordConfig.getDvrRecordConnection();
if (recordConfig.hasDvrSoftwareFeConnection()) {
@@ -477,6 +475,7 @@
if (recordConfig.getHasFrontendConnection()) {
record.hasFrontendConnection = true;
record.dvrSourceId = emptyHardwareId;
+ record.frontendId = recordConfig.getFrontendConnection();
} else {
record.hasFrontendConnection = false;
record.dvrSourceId = recordConfig.getDvrSourceConnection();
@@ -492,7 +491,6 @@
return;
}
auto descConfig = *dataFlow.getFirstDescrambling();
- descrambling.frontendId = descConfig.getFrontendConnection();
descrambling.descramblerId = descConfig.getDescramblerConnection();
descrambling.audioFilterId = descConfig.getAudioFilterConnection();
descrambling.videoFilterId = descConfig.getVideoFilterConnection();
@@ -502,6 +500,7 @@
if (descConfig.getHasFrontendConnection()) {
descrambling.hasFrontendConnection = true;
descrambling.dvrSourceId = emptyHardwareId;
+ descrambling.frontendId = descConfig.getFrontendConnection();
} else {
descrambling.hasFrontendConnection = false;
descrambling.dvrSourceId = descConfig.getDvrSourceConnection();
@@ -574,8 +573,26 @@
feConfig.getFirstDvbtFrontendSettings_optional()->getTransmissionMode());
dvbtSettings.bandwidth = static_cast<FrontendDvbtBandwidth>(
feConfig.getFirstDvbtFrontendSettings_optional()->getBandwidth());
+ dvbtSettings.constellation = static_cast<FrontendDvbtConstellation>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getConstellation());
+ dvbtSettings.hierarchy = static_cast<FrontendDvbtHierarchy>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getHierarchy());
+ dvbtSettings.hpCoderate = static_cast<FrontendDvbtCoderate>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getHpCoderate());
+ dvbtSettings.lpCoderate = static_cast<FrontendDvbtCoderate>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getLpCoderate());
+ dvbtSettings.guardInterval = static_cast<FrontendDvbtGuardInterval>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getGuardInterval());
dvbtSettings.isHighPriority =
feConfig.getFirstDvbtFrontendSettings_optional()->getIsHighPriority();
+ dvbtSettings.standard = static_cast<FrontendDvbtStandard>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getStandard());
+ dvbtSettings.isMiso = feConfig.getFirstDvbtFrontendSettings_optional()->getIsMiso();
+ dvbtSettings.plpMode = static_cast<FrontendDvbtPlpMode>(
+ feConfig.getFirstDvbtFrontendSettings_optional()->getPlpMode());
+ dvbtSettings.plpId = feConfig.getFirstDvbtFrontendSettings_optional()->getPlpId();
+ dvbtSettings.plpGroupId = feConfig.getFirstDvbtFrontendSettings_optional()->getPlpGroupId();
+
return dvbtSettings;
}
diff --git a/tv/tuner/config/api/current.txt b/tv/tuner/config/api/current.txt
index abd7155..3476414 100644
--- a/tv/tuner/config/api/current.txt
+++ b/tv/tuner/config/api/current.txt
@@ -160,10 +160,30 @@
public class DvbtFrontendSettings {
ctor public DvbtFrontendSettings();
method @Nullable public java.math.BigInteger getBandwidth();
+ method @Nullable public java.math.BigInteger getConstellation();
+ method @Nullable public java.math.BigInteger getGuardInterval();
+ method @Nullable public java.math.BigInteger getHierarchy();
+ method @Nullable public java.math.BigInteger getHpCoderate();
method @Nullable public java.math.BigInteger getIsHighPriority();
+ method @Nullable public java.math.BigInteger getIsMiso();
+ method @Nullable public java.math.BigInteger getLpCoderate();
+ method @Nullable public java.math.BigInteger getPlpGroupId();
+ method @Nullable public java.math.BigInteger getPlpId();
+ method @Nullable public java.math.BigInteger getPlpMode();
+ method @Nullable public java.math.BigInteger getStandard();
method @Nullable public java.math.BigInteger getTransmissionMode();
method public void setBandwidth(@Nullable java.math.BigInteger);
+ method public void setConstellation(@Nullable java.math.BigInteger);
+ method public void setGuardInterval(@Nullable java.math.BigInteger);
+ method public void setHierarchy(@Nullable java.math.BigInteger);
+ method public void setHpCoderate(@Nullable java.math.BigInteger);
method public void setIsHighPriority(@Nullable java.math.BigInteger);
+ method public void setIsMiso(@Nullable java.math.BigInteger);
+ method public void setLpCoderate(@Nullable java.math.BigInteger);
+ method public void setPlpGroupId(@Nullable java.math.BigInteger);
+ method public void setPlpId(@Nullable java.math.BigInteger);
+ method public void setPlpMode(@Nullable java.math.BigInteger);
+ method public void setStandard(@Nullable java.math.BigInteger);
method public void setTransmissionMode(@Nullable java.math.BigInteger);
}
diff --git a/tv/tuner/config/sample_tuner_vts_config.xml b/tv/tuner/config/sample_tuner_vts_config.xml
index 2624076..347e984 100644
--- a/tv/tuner/config/sample_tuner_vts_config.xml
+++ b/tv/tuner/config/sample_tuner_vts_config.xml
@@ -54,7 +54,10 @@
<frontends>
<frontend id="FE_DEFAULT" type="DVBT" isSoftwareFrontend="true"
connectToCicamId="0" frequency="578000" endFrequency="800000">
- <dvbtFrontendSettings bandwidth="8" transmissionMode="1" isHighPriority="1"/>
+ <dvbtFrontendSettings bandwidth="8" transmissionMode="1" isHighPriority="1"
+ constellation="1" hierarchy="1" hpCoderate="1" lpCoderate="1"
+ guardInterval="1" standard="1" isMiso="0" plpMode="1"
+ plpId="0" plpGroupId="0"/>
</frontend>
<frontend id="FE_DVBS_0" type="DVBS" isSoftwareFrontend="true"
connectToCicamId="0" frequency="578000" endFrequency="800000">
diff --git a/tv/tuner/config/tuner_testing_dynamic_configuration.xsd b/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
index e0c3e33..7f31a11 100644
--- a/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
+++ b/tv/tuner/config/tuner_testing_dynamic_configuration.xsd
@@ -52,8 +52,18 @@
<xs:complexType name="dvbtFrontendSettings">
<xs:attribute name="bandwidth" type="xs:nonNegativeInteger" use="required"/>
- <xs:attribute name="transmissionMode" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="constellation" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="guardInterval" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="hierarchy" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="hpCoderate" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="lpCoderate" type="xs:nonNegativeInteger" use="required"/>
<xs:attribute name="isHighPriority" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="isMiso" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpGroupId" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpId" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="plpMode" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="standard" type="xs:nonNegativeInteger" use="required"/>
+ <xs:attribute name="transmissionMode" type="xs:nonNegativeInteger" use="required"/>
</xs:complexType>
<xs:complexType name="dvbsFrontendSettings">
<xs:attribute name="inputStreamId" type="xs:nonNegativeInteger" use="required"/>