neuralnetworks: fix out ParcelFileDescriptor
Refactored "out @nullable ParacelFileDescriptor" into
FencedExecutionResult. It works well with NDK backend, but not with Java
backend. To avoid future incompatibility, AIDL compiler will forbid "out
ParcelFileDescriptor".
This change moves the out parameter to its own parcelable type,
FencedExecutionResult, along with IFencedExecutionCallback.
Bug: 181194872
Test: atest --test-mapping packages/modules/NeuralNetworks
Change-Id: I8e628a7b96d81338fbe59ce8c0b849f98c00536c
diff --git a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
index 4beb828..4eb704b 100644
--- a/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
+++ b/neuralnetworks/aidl/vts/functional/GeneratedTestHarness.cpp
@@ -571,33 +571,32 @@
case Executor::FENCED: {
SCOPED_TRACE("fenced");
ErrorStatus result = ErrorStatus::NONE;
- ndk::ScopedFileDescriptor syncFenceFd;
- std::shared_ptr<IFencedExecutionCallback> fencedCallback;
+ FencedExecutionResult executionResult;
auto ret = preparedModel->executeFenced(request, {}, testConfig.measureTiming,
kNoDeadline, loopTimeoutDuration, kNoDuration,
- &syncFenceFd, &fencedCallback);
+ &executionResult);
ASSERT_TRUE(ret.isOk() || ret.getExceptionCode() == EX_SERVICE_SPECIFIC)
<< ret.getDescription();
if (!ret.isOk()) {
result = static_cast<ErrorStatus>(ret.getServiceSpecificError());
executionStatus = result;
- } else if (syncFenceFd.get() != -1) {
+ } else if (executionResult.syncFence.get() != -1) {
std::vector<ndk::ScopedFileDescriptor> waitFor;
- auto dupFd = dup(syncFenceFd.get());
+ auto dupFd = dup(executionResult.syncFence.get());
ASSERT_NE(dupFd, -1);
waitFor.emplace_back(dupFd);
// If a sync fence is returned, try start another run waiting for the sync fence.
ret = preparedModel->executeFenced(request, waitFor, testConfig.measureTiming,
kNoDeadline, loopTimeoutDuration, kNoDuration,
- &syncFenceFd, &fencedCallback);
+ &executionResult);
ASSERT_TRUE(ret.isOk());
- waitForSyncFence(syncFenceFd.get());
+ waitForSyncFence(executionResult.syncFence.get());
}
if (result == ErrorStatus::NONE) {
- ASSERT_NE(fencedCallback, nullptr);
+ ASSERT_NE(executionResult.callback, nullptr);
Timing timingFenced;
- auto ret =
- fencedCallback->getExecutionInfo(&timing, &timingFenced, &executionStatus);
+ auto ret = executionResult.callback->getExecutionInfo(&timing, &timingFenced,
+ &executionStatus);
ASSERT_TRUE(ret.isOk());
}
break;
diff --git a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
index a37a0ca..1929750 100644
--- a/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
+++ b/neuralnetworks/aidl/vts/functional/MemoryDomainTests.cpp
@@ -198,8 +198,8 @@
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
ndk::ScopedAStatus executeFenced(const Request&, const std::vector<ndk::ScopedFileDescriptor>&,
- bool, int64_t, int64_t, int64_t, ndk::ScopedFileDescriptor*,
- std::shared_ptr<IFencedExecutionCallback>*) override {
+ bool, int64_t, int64_t, int64_t,
+ FencedExecutionResult*) override {
return ndk::ScopedAStatus::fromServiceSpecificError(
static_cast<int32_t>(ErrorStatus::GENERAL_FAILURE));
}
@@ -893,25 +893,24 @@
ErrorStatus executeFenced(const std::shared_ptr<IPreparedModel>& preparedModel,
const Request& request) {
- ndk::ScopedFileDescriptor syncFence;
- std::shared_ptr<IFencedExecutionCallback> fencedCallback;
+ FencedExecutionResult executionResult;
const auto ret = preparedModel->executeFenced(request, {}, false, kNoDeadline,
kOmittedTimeoutDuration, kNoDuration,
- &syncFence, &fencedCallback);
+ &executionResult);
if (!ret.isOk()) {
EXPECT_EQ(ret.getExceptionCode(), EX_SERVICE_SPECIFIC);
return static_cast<ErrorStatus>(ret.getServiceSpecificError());
}
- if (syncFence.get() != -1) {
- waitForSyncFence(syncFence.get());
+ if (executionResult.syncFence.get() != -1) {
+ waitForSyncFence(executionResult.syncFence.get());
}
- EXPECT_NE(fencedCallback, nullptr);
+ EXPECT_NE(executionResult.callback, nullptr);
ErrorStatus executionStatus = ErrorStatus::GENERAL_FAILURE;
Timing time = kNoTiming;
Timing timeFenced = kNoTiming;
const auto retExecutionInfo =
- fencedCallback->getExecutionInfo(&time, &timeFenced, &executionStatus);
+ executionResult.callback->getExecutionInfo(&time, &timeFenced, &executionStatus);
EXPECT_TRUE(retExecutionInfo.isOk());
EXPECT_EQ(time, kNoTiming);
return executionStatus;
diff --git a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
index db8f429..3be4c1b 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateRequest.cpp
@@ -68,11 +68,10 @@
// fenced
{
SCOPED_TRACE(message + " [executeFenced]");
- ndk::ScopedFileDescriptor syncFence;
- std::shared_ptr<IFencedExecutionCallback> callback;
+ FencedExecutionResult executionResult;
const auto executeStatus = preparedModel->executeFenced(request, {}, false, kNoDeadline,
kOmittedTimeoutDuration,
- kNoDuration, &syncFence, &callback);
+ kNoDuration, &executionResult);
ASSERT_FALSE(executeStatus.isOk());
ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),