Invalidate NN interface objects on cache mismatch

Currently, if an IDevice object is a DEAD_OBJECT, the runtime attempts
to re-retrieve the handle to the rebooted IDevice service. If an update
occurs after the IDevice was originally created, the rebooted IDevice
object may have different metadata and behavior. This is problematic
because the original metadata is cached in the runtime. Further, an
application might have made decisions based on that metadata and
behavior. (Note that a driver service that is functionally the same but
has a different underlying implementation such as having more optimized
code will have different `getVersionString` metadata.) Instead, this CL
invalidates the IDevice object on cache mismatch, and always returns an
error if it is used.

Bug: 173081926
Test: mma
Change-Id: I805987361c627c32d45e1b7c7aed230376fc66ad
Merged-In: I805987361c627c32d45e1b7c7aed230376fc66ad
(cherry picked from commit 5a74c0fb0f23474a89471c49111e5ab526735392)
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h
new file mode 100644
index 0000000..8c04b88
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidBuffer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H
+
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidBuffer final : public nn::IBuffer {
+  public:
+    nn::Request::MemoryDomainToken getToken() const override;
+
+    nn::GeneralResult<void> copyTo(const nn::Memory& dst) const override;
+
+    nn::GeneralResult<void> copyFrom(const nn::Memory& src,
+                                     const nn::Dimensions& dimensions) const override;
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_BUFFER_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
new file mode 100644
index 0000000..5e62b9a
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidDevice.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H
+
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidDevice final : public nn::IDevice {
+  public:
+    InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel,
+                  nn::DeviceType type, std::vector<nn::Extension> extensions,
+                  nn::Capabilities capabilities,
+                  std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded);
+
+    const std::string& getName() const override;
+    const std::string& getVersionString() const override;
+    nn::Version getFeatureLevel() const override;
+    nn::DeviceType getType() const override;
+    const std::vector<nn::Extension>& getSupportedExtensions() const override;
+    const nn::Capabilities& getCapabilities() const override;
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override;
+
+    nn::GeneralResult<void> wait() const override;
+
+    nn::GeneralResult<std::vector<bool>> getSupportedOperations(
+            const nn::Model& model) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModel(
+            const nn::Model& model, nn::ExecutionPreference preference, nn::Priority priority,
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedPreparedModel> prepareModelFromCache(
+            nn::OptionalTimePoint deadline, const std::vector<nn::SharedHandle>& modelCache,
+            const std::vector<nn::SharedHandle>& dataCache,
+            const nn::CacheToken& token) const override;
+
+    nn::GeneralResult<nn::SharedBuffer> allocate(
+            const nn::BufferDesc& desc, const std::vector<nn::SharedPreparedModel>& preparedModels,
+            const std::vector<nn::BufferRole>& inputRoles,
+            const std::vector<nn::BufferRole>& outputRoles) const override;
+
+  private:
+    const std::string kName;
+    const std::string kVersionString;
+    const nn::Version kFeatureLevel;
+    const nn::DeviceType kType;
+    const std::vector<nn::Extension> kExtensions;
+    const nn::Capabilities kCapabilities;
+    const std::pair<uint32_t, uint32_t> kNumberOfCacheFilesNeeded;
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_DEVICE_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
new file mode 100644
index 0000000..4b32b4e
--- /dev/null
+++ b/neuralnetworks/utils/common/include/nnapi/hal/InvalidPreparedModel.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H
+
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+class InvalidPreparedModel final : public nn::IPreparedModel {
+  public:
+    nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> execute(
+            const nn::Request& request, nn::MeasureTiming measure,
+            const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration) const override;
+
+    nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> executeFenced(
+            const nn::Request& request, const std::vector<nn::SyncFence>& waitFor,
+            nn::MeasureTiming measure, const nn::OptionalTimePoint& deadline,
+            const nn::OptionalTimeoutDuration& loopTimeoutDuration,
+            const nn::OptionalTimeoutDuration& timeoutDurationAfterFence) const override;
+
+    std::any getUnderlyingResource() const override;
+};
+
+}  // namespace android::hardware::neuralnetworks::utils
+
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_INVALID_PREPARED_MODEL_H
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
index 4a84e4d..4bfed6c 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/ResilientDevice.h
@@ -45,8 +45,9 @@
                              std::string versionString, std::vector<nn::Extension> extensions,
                              nn::Capabilities capabilities, nn::SharedDevice device);
 
-    nn::SharedDevice getDevice() const;
-    nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const;
+    nn::SharedDevice getDevice() const EXCLUDES(mMutex);
+    nn::SharedDevice recover(const nn::IDevice* failingDevice, bool blocking) const
+            EXCLUDES(mMutex);
 
     const std::string& getName() const override;
     const std::string& getVersionString() const override;
@@ -78,6 +79,7 @@
             const std::vector<nn::BufferRole>& outputRoles) const override;
 
   private:
+    bool isValidInternal() const EXCLUDES(mMutex);
     nn::GeneralResult<nn::SharedPreparedModel> prepareModelInternal(
             bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
             nn::Priority priority, nn::OptionalTimePoint deadline,
@@ -100,6 +102,7 @@
     const nn::Capabilities kCapabilities;
     mutable std::mutex mMutex;
     mutable nn::SharedDevice mDevice GUARDED_BY(mMutex);
+    mutable bool mIsValid GUARDED_BY(mMutex) = true;
 };
 
 }  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidBuffer.cpp b/neuralnetworks/utils/common/src/InvalidBuffer.cpp
new file mode 100644
index 0000000..c6f75d7
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidBuffer.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidBuffer.h"
+
+#include <nnapi/IBuffer.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::Request::MemoryDomainToken InvalidBuffer::getToken() const {
+    return nn::Request::MemoryDomainToken{};
+}
+
+nn::GeneralResult<void> InvalidBuffer::copyTo(const nn::Memory& /*dst*/) const {
+    return NN_ERROR() << "InvalidBuffer";
+}
+
+nn::GeneralResult<void> InvalidBuffer::copyFrom(const nn::Memory& /*src*/,
+                                                const nn::Dimensions& /*dimensions*/) const {
+    return NN_ERROR() << "InvalidBuffer";
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidDevice.cpp b/neuralnetworks/utils/common/src/InvalidDevice.cpp
new file mode 100644
index 0000000..535ccb4
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidDevice.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidDevice.h"
+
+#include "InvalidBuffer.h"
+#include "InvalidPreparedModel.h"
+
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+InvalidDevice::InvalidDevice(std::string name, std::string versionString, nn::Version featureLevel,
+                             nn::DeviceType type, std::vector<nn::Extension> extensions,
+                             nn::Capabilities capabilities,
+                             std::pair<uint32_t, uint32_t> numberOfCacheFilesNeeded)
+    : kName(std::move(name)),
+      kVersionString(std::move(versionString)),
+      kFeatureLevel(featureLevel),
+      kType(type),
+      kExtensions(std::move(extensions)),
+      kCapabilities(std::move(capabilities)),
+      kNumberOfCacheFilesNeeded(numberOfCacheFilesNeeded) {}
+
+const std::string& InvalidDevice::getName() const {
+    return kName;
+}
+
+const std::string& InvalidDevice::getVersionString() const {
+    return kVersionString;
+}
+
+nn::Version InvalidDevice::getFeatureLevel() const {
+    return kFeatureLevel;
+}
+
+nn::DeviceType InvalidDevice::getType() const {
+    return kType;
+}
+
+const std::vector<nn::Extension>& InvalidDevice::getSupportedExtensions() const {
+    return kExtensions;
+}
+
+const nn::Capabilities& InvalidDevice::getCapabilities() const {
+    return kCapabilities;
+}
+
+std::pair<uint32_t, uint32_t> InvalidDevice::getNumberOfCacheFilesNeeded() const {
+    return kNumberOfCacheFilesNeeded;
+}
+
+nn::GeneralResult<void> InvalidDevice::wait() const {
+    return NN_ERROR() << "InvalidDevice";
+}
+
+nn::GeneralResult<std::vector<bool>> InvalidDevice::getSupportedOperations(
+        const nn::Model& /*model*/) const {
+    return NN_ERROR() << "InvalidDevice";
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModel(
+        const nn::Model& /*model*/, nn::ExecutionPreference /*preference*/,
+        nn::Priority /*priority*/, nn::OptionalTimePoint /*deadline*/,
+        const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    return NN_ERROR() << "InvalidDevice";
+}
+
+nn::GeneralResult<nn::SharedPreparedModel> InvalidDevice::prepareModelFromCache(
+        nn::OptionalTimePoint /*deadline*/, const std::vector<nn::SharedHandle>& /*modelCache*/,
+        const std::vector<nn::SharedHandle>& /*dataCache*/, const nn::CacheToken& /*token*/) const {
+    return NN_ERROR() << "InvalidDevice";
+}
+
+nn::GeneralResult<nn::SharedBuffer> InvalidDevice::allocate(
+        const nn::BufferDesc& /*desc*/,
+        const std::vector<nn::SharedPreparedModel>& /*preparedModels*/,
+        const std::vector<nn::BufferRole>& /*inputRoles*/,
+        const std::vector<nn::BufferRole>& /*outputRoles*/) const {
+    return NN_ERROR() << "InvalidDevice";
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
new file mode 100644
index 0000000..9ae7a63
--- /dev/null
+++ b/neuralnetworks/utils/common/src/InvalidPreparedModel.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InvalidPreparedModel.h"
+
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace android::hardware::neuralnetworks::utils {
+
+nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
+InvalidPreparedModel::execute(const nn::Request& /*request*/, nn::MeasureTiming /*measure*/,
+                              const nn::OptionalTimePoint& /*deadline*/,
+                              const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/) const {
+    return NN_ERROR() << "InvalidPreparedModel";
+}
+
+nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>>
+InvalidPreparedModel::executeFenced(
+        const nn::Request& /*request*/, const std::vector<nn::SyncFence>& /*waitFor*/,
+        nn::MeasureTiming /*measure*/, const nn::OptionalTimePoint& /*deadline*/,
+        const nn::OptionalTimeoutDuration& /*loopTimeoutDuration*/,
+        const nn::OptionalTimeoutDuration& /*timeoutDurationAfterFence*/) const {
+    return NN_ERROR() << "InvalidPreparedModel";
+}
+
+std::any InvalidPreparedModel::getUnderlyingResource() const {
+    return {};
+}
+
+}  // namespace android::hardware::neuralnetworks::utils
diff --git a/neuralnetworks/utils/common/src/ResilientDevice.cpp b/neuralnetworks/utils/common/src/ResilientDevice.cpp
index 26025a5..2f83c5c 100644
--- a/neuralnetworks/utils/common/src/ResilientDevice.cpp
+++ b/neuralnetworks/utils/common/src/ResilientDevice.cpp
@@ -16,6 +16,9 @@
 
 #include "ResilientDevice.h"
 
+#include "InvalidBuffer.h"
+#include "InvalidDevice.h"
+#include "InvalidPreparedModel.h"
 #include "ResilientBuffer.h"
 #include "ResilientPreparedModel.h"
 
@@ -107,12 +110,21 @@
     }
     auto device = std::move(maybeDevice).value();
 
-    // TODO(b/173081926): Instead of CHECKing to ensure the cache has not been changed, return an
-    // invalid/"null" IDevice object that always fails.
-    CHECK_EQ(kName, device->getName());
-    CHECK_EQ(kVersionString, device->getVersionString());
-    CHECK(kExtensions == device->getSupportedExtensions());
-    CHECK_EQ(kCapabilities, device->getCapabilities());
+    // If recovered device has different metadata than what is cached (i.e., because it was
+    // updated), mark the device as invalid and preserve the cached data.
+    auto compare = [this, &device](auto fn) REQUIRES(mMutex) {
+        return std::invoke(fn, mDevice) != std::invoke(fn, device);
+    };
+    if (compare(&IDevice::getName) || compare(&IDevice::getVersionString) ||
+        compare(&IDevice::getFeatureLevel) || compare(&IDevice::getType) ||
+        compare(&IDevice::getSupportedExtensions) || compare(&IDevice::getCapabilities)) {
+        LOG(ERROR) << "Recovered device has different metadata than what is cached. Marking "
+                      "IDevice object as invalid.";
+        device = std::make_shared<const InvalidDevice>(
+                kName, kVersionString, mDevice->getFeatureLevel(), mDevice->getType(), kExtensions,
+                kCapabilities, mDevice->getNumberOfCacheFilesNeeded());
+        mIsValid = false;
+    }
 
     mDevice = std::move(device);
     return mDevice;
@@ -199,11 +211,19 @@
     return ResilientBuffer::create(std::move(makeBuffer));
 }
 
+bool ResilientDevice::isValidInternal() const {
+    std::lock_guard hold(mMutex);
+    return mIsValid;
+}
+
 nn::GeneralResult<nn::SharedPreparedModel> ResilientDevice::prepareModelInternal(
         bool blocking, const nn::Model& model, nn::ExecutionPreference preference,
         nn::Priority priority, nn::OptionalTimePoint deadline,
         const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+    if (!isValidInternal()) {
+        return std::make_shared<const InvalidPreparedModel>();
+    }
     const auto fn = [&model, preference, priority, deadline, &modelCache, &dataCache,
                      token](const nn::IDevice& device) {
         return device.prepareModel(model, preference, priority, deadline, modelCache, dataCache,
@@ -216,6 +236,9 @@
         bool blocking, nn::OptionalTimePoint deadline,
         const std::vector<nn::SharedHandle>& modelCache,
         const std::vector<nn::SharedHandle>& dataCache, const nn::CacheToken& token) const {
+    if (!isValidInternal()) {
+        return std::make_shared<const InvalidPreparedModel>();
+    }
     const auto fn = [deadline, &modelCache, &dataCache, token](const nn::IDevice& device) {
         return device.prepareModelFromCache(deadline, modelCache, dataCache, token);
     };
@@ -227,6 +250,9 @@
         const std::vector<nn::SharedPreparedModel>& preparedModels,
         const std::vector<nn::BufferRole>& inputRoles,
         const std::vector<nn::BufferRole>& outputRoles) const {
+    if (!isValidInternal()) {
+        return std::make_shared<const InvalidBuffer>();
+    }
     const auto fn = [&desc, &preparedModels, &inputRoles, &outputRoles](const nn::IDevice& device) {
         return device.allocate(desc, preparedModels, inputRoles, outputRoles);
     };