Reconfigure the input surface when needed.

When the set of output streams changes so that
different input configuration is picked, reconfigure
client to use new Surface with preferred input configuration.

Bug: 343628528
Test: atest virtual_camera_tests
Test: atest CtsVirtualDevicesCameraTestCases
Test: atest CtsVirtualDevicesCameraCtsTestCases

Change-Id: I8ec56adf0a230981be779a9b51d8505f1c83647c
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.cc b/services/camera/virtualcamera/VirtualCameraDevice.cc
index ba4ea6b..e455378 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.cc
+++ b/services/camera/virtualcamera/VirtualCameraDevice.cc
@@ -618,6 +618,10 @@
   return maxResolution.value();
 }
 
+int VirtualCameraDevice::allocateInputStreamId() {
+  return mNextInputStreamId++;
+}
+
 std::shared_ptr<VirtualCameraDevice> VirtualCameraDevice::sharedFromThis() {
   // SharedRefBase which BnCameraDevice inherits from breaks
   // std::enable_shared_from_this. This is recommended replacement for
diff --git a/services/camera/virtualcamera/VirtualCameraDevice.h b/services/camera/virtualcamera/VirtualCameraDevice.h
index 53dcc4d..296383f 100644
--- a/services/camera/virtualcamera/VirtualCameraDevice.h
+++ b/services/camera/virtualcamera/VirtualCameraDevice.h
@@ -104,6 +104,9 @@
   // Returns largest supported input resolution.
   Resolution getMaxInputResolution() const;
 
+  // Allocate and return next id for input stream (input surface).
+  int allocateInputStreamId();
+
   // Maximal number of RAW streams - virtual camera doesn't support RAW streams.
   static constexpr int32_t kMaxNumberOfRawStreams = 0;
 
@@ -148,6 +151,8 @@
   const std::vector<
       aidl::android::companion::virtualcamera::SupportedStreamConfiguration>
       mSupportedInputConfigurations;
+
+  std::atomic_int mNextInputStreamId;
 };
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/VirtualCameraSession.cc b/services/camera/virtualcamera/VirtualCameraSession.cc
index 7f0adc3..e1815c7 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.cc
+++ b/services/camera/virtualcamera/VirtualCameraSession.cc
@@ -212,6 +212,27 @@
   return Resolution(inputConfig.width, inputConfig.height);
 }
 
+std::optional<Resolution> resolutionFromSurface(const sp<Surface> surface) {
+  Resolution res{0, 0};
+  if (surface == nullptr) {
+    ALOGE("%s: Cannot get resolution from null surface", __func__);
+    return std::nullopt;
+  }
+
+  int status = surface->query(NATIVE_WINDOW_WIDTH, &res.width);
+  if (status != NO_ERROR) {
+    ALOGE("%s: Failed to get width from surface", __func__);
+    return std::nullopt;
+  }
+
+  status = surface->query(NATIVE_WINDOW_HEIGHT, &res.height);
+  if (status != NO_ERROR) {
+    ALOGE("%s: Failed to get height from surface", __func__);
+    return std::nullopt;
+  }
+  return res;
+}
+
 std::optional<SupportedStreamConfiguration> pickInputConfigurationForStreams(
     const std::vector<Stream>& requestedStreams,
     const std::vector<SupportedStreamConfiguration>& supportedInputConfigs) {
@@ -292,13 +313,13 @@
 
 ndk::ScopedAStatus VirtualCameraSession::close() {
   ALOGV("%s", __func__);
-
-  if (mVirtualCameraClientCallback != nullptr) {
-    mVirtualCameraClientCallback->onStreamClosed(/*streamId=*/0);
-  }
-
   {
     std::lock_guard<std::mutex> lock(mLock);
+
+    if (mVirtualCameraClientCallback != nullptr) {
+      mVirtualCameraClientCallback->onStreamClosed(mCurrentInputStreamId);
+    }
+
     if (mRenderThread != nullptr) {
       mRenderThread->stop();
       mRenderThread = nullptr;
@@ -339,6 +360,7 @@
   }
 
   sp<Surface> inputSurface = nullptr;
+  int inputStreamId = -1;
   std::optional<SupportedStreamConfiguration> inputConfig;
   {
     std::lock_guard<std::mutex> lock(mLock);
@@ -358,13 +380,49 @@
           __func__, in_requestedConfiguration.toString().c_str());
       return cameraStatus(Status::ILLEGAL_ARGUMENT);
     }
-    if (mRenderThread == nullptr) {
-      mRenderThread = std::make_unique<VirtualCameraRenderThread>(
-          mSessionContext, resolutionFromInputConfig(*inputConfig),
-          virtualCamera->getMaxInputResolution(), mCameraDeviceCallback);
-      mRenderThread->start();
-      inputSurface = mRenderThread->getInputSurface();
+
+    if (mRenderThread != nullptr) {
+      // If there's already a render thread, it means this is not a first
+      // configuration call. If the surface has the same resolution and pixel
+      // format as the picked config, we don't need to do anything, the current
+      // render thread is capable of serving new set of configuration. However
+      // if it differens, we need to discard the current surface and
+      // reinitialize the render thread.
+
+      std::optional<Resolution> currentInputResolution =
+          resolutionFromSurface(mRenderThread->getInputSurface());
+      if (currentInputResolution.has_value() &&
+          *currentInputResolution == resolutionFromInputConfig(*inputConfig)) {
+        ALOGI(
+            "%s: Newly configured set of streams matches existing client "
+            "surface (%dx%d)",
+            __func__, currentInputResolution->width,
+            currentInputResolution->height);
+        return ndk::ScopedAStatus::ok();
+      }
+
+      if (mVirtualCameraClientCallback != nullptr) {
+        mVirtualCameraClientCallback->onStreamClosed(mCurrentInputStreamId);
+      }
+
+      ALOGV(
+          "%s: Newly requested output streams are not suitable for "
+          "pre-existing surface (%dx%d), creating new surface (%dx%d)",
+          __func__, currentInputResolution->width,
+          currentInputResolution->height, inputConfig->width,
+          inputConfig->height);
+
+      mRenderThread->flush();
+      mRenderThread->stop();
     }
+
+    mRenderThread = std::make_unique<VirtualCameraRenderThread>(
+        mSessionContext, resolutionFromInputConfig(*inputConfig),
+        virtualCamera->getMaxInputResolution(), mCameraDeviceCallback);
+    mRenderThread->start();
+    inputSurface = mRenderThread->getInputSurface();
+    inputStreamId = mCurrentInputStreamId =
+        virtualCamera->allocateInputStreamId();
   }
 
   if (mVirtualCameraClientCallback != nullptr && inputSurface != nullptr) {
@@ -372,7 +430,7 @@
     // support for multiple input streams is implemented. For now we always
     // create single texture.
     mVirtualCameraClientCallback->onStreamConfigured(
-        /*streamId=*/0, aidl::android::view::Surface(inputSurface.get()),
+        inputStreamId, aidl::android::view::Surface(inputSurface.get()),
         inputConfig->width, inputConfig->height, inputConfig->pixelFormat);
   }
 
@@ -519,6 +577,7 @@
 
   std::shared_ptr<ICameraDeviceCallback> cameraCallback = nullptr;
   RequestSettings requestSettings;
+  int currentInputStreamId;
   {
     std::lock_guard<std::mutex> lock(mLock);
 
@@ -537,6 +596,7 @@
     requestSettings = createSettingsFromMetadata(mCurrentRequestMetadata);
 
     cameraCallback = mCameraDeviceCallback;
+    currentInputStreamId = mCurrentInputStreamId;
   }
 
   if (cameraCallback == nullptr) {
@@ -574,7 +634,7 @@
 
   if (mVirtualCameraClientCallback != nullptr) {
     auto status = mVirtualCameraClientCallback->onProcessCaptureRequest(
-        /*streamId=*/0, request.frameNumber);
+        currentInputStreamId, request.frameNumber);
     if (!status.isOk()) {
       ALOGE(
           "Failed to invoke onProcessCaptureRequest client callback for frame "
diff --git a/services/camera/virtualcamera/VirtualCameraSession.h b/services/camera/virtualcamera/VirtualCameraSession.h
index 556314f..c2044b9 100644
--- a/services/camera/virtualcamera/VirtualCameraSession.h
+++ b/services/camera/virtualcamera/VirtualCameraSession.h
@@ -143,6 +143,8 @@
       GUARDED_BY(mLock);
 
   std::unique_ptr<VirtualCameraRenderThread> mRenderThread GUARDED_BY(mLock);
+
+  int mCurrentInputStreamId GUARDED_BY(mLock);
 };
 
 }  // namespace virtualcamera
diff --git a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
index 671e031..93f90b4 100644
--- a/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
+++ b/services/camera/virtualcamera/tests/VirtualCameraSessionTest.cc
@@ -379,6 +379,92 @@
           .isOk());
 }
 
+TEST_F(VirtualCameraSessionInputChoiceTest, reconfigureSwitchesInputStream) {
+  // Create camera configured to support SVGA YUV input and RGB QVGA input.
+  auto virtualCameraSession = createSession(
+      {SupportedStreamConfiguration{.width = kSvgaWidth,
+                                    .height = kSvgaHeight,
+                                    .pixelFormat = Format::YUV_420_888,
+                                    .maxFps = kMaxFps},
+       SupportedStreamConfiguration{.width = kQvgaWidth,
+                                    .height = kQvgaHeight,
+                                    .pixelFormat = Format::RGBA_8888,
+                                    .maxFps = kMaxFps}});
+
+  // First configure QVGA stream.
+  StreamConfiguration streamConfiguration;
+  streamConfiguration.streams = {createStream(
+      kStreamId, kQvgaWidth, kQvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+  std::vector<HalStream> halStreams;
+
+  // Expect QVGA input configuragion to be chosen.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kQvgaWidth, kQvgaHeight,
+                                 Format::RGBA_8888));
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+
+  // Reconfigure with additional VGA stream.
+  streamConfiguration.streams.push_back(
+      createStream(kStreamId + 1, kVgaWidth, kVgaHeight,
+                   PixelFormat::IMPLEMENTATION_DEFINED));
+
+  // Expect original surface to be discarded.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback, onStreamClosed(kStreamId));
+
+  // Expect SVGA input configuragion to be chosen.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId + 1, _, kSvgaWidth, kSvgaHeight,
+                                 Format::YUV_420_888));
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+}
+
+TEST_F(VirtualCameraSessionInputChoiceTest,
+       reconfigureKeepsInputStreamIfUnchanged) {
+  // Create camera configured to support SVGA YUV input and RGB QVGA input.
+  auto virtualCameraSession = createSession(
+      {SupportedStreamConfiguration{.width = kSvgaWidth,
+                                    .height = kSvgaHeight,
+                                    .pixelFormat = Format::YUV_420_888,
+                                    .maxFps = kMaxFps},
+       SupportedStreamConfiguration{.width = kQvgaWidth,
+                                    .height = kQvgaHeight,
+                                    .pixelFormat = Format::RGBA_8888,
+                                    .maxFps = kMaxFps}});
+
+  // First configure SVGA stream.
+  StreamConfiguration streamConfiguration;
+  streamConfiguration.streams = {createStream(
+      kStreamId, kSvgaWidth, kSvgaHeight, PixelFormat::IMPLEMENTATION_DEFINED)};
+  std::vector<HalStream> halStreams;
+
+  // Expect SVGA input configuragion to be chosen.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback,
+              onStreamConfigured(kStreamId, _, kSvgaWidth, kSvgaHeight,
+                                 Format::YUV_420_888));
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+
+  // Reconfigure with VGA + QVA stream. Because we only allow downscaling,
+  // this will be matched to SVGA input resolution.
+  streamConfiguration.streams = {
+      createStream(kStreamId + 1, kVgaWidth, kVgaHeight,
+                   PixelFormat::IMPLEMENTATION_DEFINED),
+      createStream(kStreamId + 2, kVgaWidth, kVgaHeight,
+                   PixelFormat::IMPLEMENTATION_DEFINED)};
+
+  // Expect the onStreamConfigured callback not to be invoked, since the
+  // original Surface is still best fit for current output streams.
+  EXPECT_CALL(*mMockVirtualCameraClientCallback, onStreamConfigured).Times(0);
+  EXPECT_TRUE(
+      virtualCameraSession->configureStreams(streamConfiguration, &halStreams)
+          .isOk());
+}
+
 }  // namespace
 }  // namespace virtualcamera
 }  // namespace companion