Merge "Add otapreopt logic for CompactDex version bump" am: 1812ea7c47 am: c63997207e
am: 9c7b560015

Change-Id: Ia62371b43501d4e9c774baf1f563a587bff45c75
diff --git a/headers/media_plugin/media/openmax/OMX_IndexExt.h b/headers/media_plugin/media/openmax/OMX_IndexExt.h
index 5a029d0..c2bf97e 100644
--- a/headers/media_plugin/media/openmax/OMX_IndexExt.h
+++ b/headers/media_plugin/media/openmax/OMX_IndexExt.h
@@ -85,6 +85,7 @@
     OMX_IndexParamMaxFrameDurationForBitrateControl,/**< reference: OMX_PARAM_U32TYPE */
     OMX_IndexParamVideoVp9,                         /**< reference: OMX_VIDEO_PARAM_VP9TYPE */
     OMX_IndexParamVideoAndroidVp9Encoder,           /**< reference: OMX_VIDEO_PARAM_ANDROID_VP9ENCODERTYPE */
+    OMX_IndexParamVideoAndroidImageGrid,            /**< reference: OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE */
     OMX_IndexExtVideoEndUnused,
 
     /* Image & Video common configurations */
diff --git a/headers/media_plugin/media/openmax/OMX_Video.h b/headers/media_plugin/media/openmax/OMX_Video.h
index dc5cdab..9fd2fd2 100644
--- a/headers/media_plugin/media/openmax/OMX_Video.h
+++ b/headers/media_plugin/media/openmax/OMX_Video.h
@@ -89,6 +89,7 @@
     OMX_VIDEO_CodingVP9,        /**< Google VP9 */
     OMX_VIDEO_CodingHEVC,       /**< ITU H.265/HEVC */
     OMX_VIDEO_CodingDolbyVision,/**< Dolby Vision */
+    OMX_VIDEO_CodingImageHEIC,  /**< HEIF image encoded with HEVC */
     OMX_VIDEO_CodingKhronosExtensions = 0x6F000000, /**< Reserved region for introducing Khronos Standard Extensions */
     OMX_VIDEO_CodingVendorStartUnused = 0x7F000000, /**< Reserved region for introducing Vendor Extensions */
     OMX_VIDEO_CodingMax = 0x7FFFFFFF
diff --git a/headers/media_plugin/media/openmax/OMX_VideoExt.h b/headers/media_plugin/media/openmax/OMX_VideoExt.h
index c102564..4b90765 100644
--- a/headers/media_plugin/media/openmax/OMX_VideoExt.h
+++ b/headers/media_plugin/media/openmax/OMX_VideoExt.h
@@ -213,6 +213,7 @@
     OMX_VIDEO_HEVCProfileUnknown      = 0x0,
     OMX_VIDEO_HEVCProfileMain         = 0x1,
     OMX_VIDEO_HEVCProfileMain10       = 0x2,
+    OMX_VIDEO_HEVCProfileMainStill    = 0x4,
     // Main10 profile with HDR SEI support.
     OMX_VIDEO_HEVCProfileMain10HDR10  = 0x1000,
     OMX_VIDEO_HEVCProfileMax          = 0x7FFFFFFF
@@ -421,6 +422,48 @@
     OMX_U32 nBitrateRatios[OMX_VIDEO_ANDROID_MAXTEMPORALLAYERS];
 } OMX_VIDEO_CONFIG_ANDROID_TEMPORALLAYERINGTYPE;
 
+/**
+ * Android specific param for specifying image grid layout information for image encoding
+ * use cases, corresponding to index OMX_IndexParamVideoAndroidImageGrid.
+ *
+ * OMX_VIDEO_CodingImageHEIC encoders must handle this param type. When this param is set
+ * on the component with bEnabled set to true, nGrid* indicates the desired grid config
+ * by the client. The component can use this as a heuristic, but is free to choose any
+ * suitable grid configs, and the client shall always get the actual from the component
+ * after the param is set. Encoder will receive each input image in full, and shall encode
+ * it into tiles in row-major, top-row first, left-to-right order, and send each encoded
+ * tile in a separate output buffer. All output buffers for the same input buffer shall
+ * carry the same timestamp as the input buffer. If the input buffer is marked EOS,
+ * the EOS should only appear on the last output buffer for that input buffer.
+ *
+ * OMX_VIDEO_CodingHEVC encoders might also receive this param when it's used for image
+ * encoding, although in this case the param only serves as a hint. The encoder will
+ * receive the input image tiles in row-major, top-row first, left-to-right order.
+ * The grid config can be used for quality control, or optimizations.
+ *
+ * If this param is not set, the component shall assume that grid option is disabled.
+ *
+ *  nSize                      : Size of the structure in bytes
+ *  nVersion                   : OMX specification version information
+ *  nPortIndex                 : Port that this structure applies to (output port for encoders)
+ *  bEnabled                   : Whether grid is enabled. If true, nGrid* specifies the grid
+ *                               config; otherwise nGrid* shall be ignored.
+ *  nGridWidth                 : Width of each tile.
+ *  nGridHeight                : Height of each tile.
+ *  nGridRows                  : Number of rows in the grid.
+ *  nGridCols                  : Number of cols in the grid.
+ */
+typedef struct OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE {
+    OMX_U32 nSize;
+    OMX_VERSIONTYPE nVersion;
+    OMX_U32 nPortIndex;
+    OMX_BOOL bEnabled;
+    OMX_U32 nGridWidth;
+    OMX_U32 nGridHeight;
+    OMX_U32 nGridRows;
+    OMX_U32 nGridCols;
+} OMX_VIDEO_PARAM_ANDROID_IMAGEGRIDTYPE;
+
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
diff --git a/libs/vr/libdvr/tests/dvr_api_test.h b/libs/vr/libdvr/tests/dvr_api_test.h
index 648af75..d8359e7 100644
--- a/libs/vr/libdvr/tests/dvr_api_test.h
+++ b/libs/vr/libdvr/tests/dvr_api_test.h
@@ -3,8 +3,6 @@
 
 #include <gtest/gtest.h>
 
-#define ASSERT_NOT_NULL(x) ASSERT_TRUE((x) != nullptr)
-
 /** DvrTestBase loads the libdvr.so at runtime and get the Dvr API version 1. */
 class DvrApiTest : public ::testing::Test {
  protected:
@@ -17,11 +15,11 @@
     // https://github.com/android-ndk/ndk/issues/360
     flags |= RTLD_NODELETE;
     platform_handle_ = dlopen("libdvr.so", flags);
-    ASSERT_NOT_NULL(platform_handle_) << "Dvr shared library missing.";
+    ASSERT_NE(nullptr, platform_handle_) << "Dvr shared library missing.";
 
     auto dvr_get_api = reinterpret_cast<decltype(&dvrGetApi)>(
         dlsym(platform_handle_, "dvrGetApi"));
-    ASSERT_NOT_NULL(dvr_get_api) << "Platform library missing dvrGetApi.";
+    ASSERT_NE(nullptr, dvr_get_api) << "Platform library missing dvrGetApi.";
 
     ASSERT_EQ(dvr_get_api(&api_, sizeof(api_), /*version=*/1), 0)
         << "Unable to find compatible Dvr API.";
diff --git a/libs/vr/libdvr/tests/dvr_display-test.cpp b/libs/vr/libdvr/tests/dvr_display-test.cpp
index 1165573..c72f940 100644
--- a/libs/vr/libdvr/tests/dvr_display-test.cpp
+++ b/libs/vr/libdvr/tests/dvr_display-test.cpp
@@ -16,20 +16,58 @@
 
 class DvrDisplayTest : public DvrApiTest {
  protected:
+  void SetUp() override {
+    DvrApiTest::SetUp();
+    int ret = api_.GetNativeDisplayMetrics(sizeof(display_metrics_),
+                                           &display_metrics_);
+    ASSERT_EQ(ret, 0) << "Failed to get display metrics.";
+    ALOGD(
+        "display_width: %d, display_height: %d, display_x_dpi: %d, "
+        "display_y_dpi: %d, vsync_period_ns: %d.",
+        display_metrics_.display_width, display_metrics_.display_height,
+        display_metrics_.display_x_dpi, display_metrics_.display_y_dpi,
+        display_metrics_.vsync_period_ns);
+  }
+
   void TearDown() override {
     if (write_queue_ != nullptr) {
       api_.WriteBufferQueueDestroy(write_queue_);
       write_queue_ = nullptr;
     }
+    if (direct_surface_ != nullptr) {
+      api_.SurfaceDestroy(direct_surface_);
+      direct_surface_ = nullptr;
+    }
     DvrApiTest::TearDown();
   }
 
+  /* Convert a write buffer to an android hardware buffer and fill in
+   * color_textures evenly to the buffer.
+   * AssertionError if the width of the buffer is not equal to the input width,
+   * AssertionError if the height of the buffer is not equal to the input
+   * height.
+   */
+  void FillWriteBuffer(DvrWriteBuffer* write_buffer,
+                       const std::vector<uint32_t>& color_textures,
+                       uint32_t width, uint32_t height);
+
+  // Write buffer queue properties.
+  static constexpr uint64_t kUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
+                                     AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
+                                     AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+  uint32_t kFormat = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+  static constexpr size_t kMetadataSize = 0;
+  static constexpr int kTimeoutMs = 1000;  // Time for getting buffer.
+  uint32_t kLayerCount = 1;
   DvrWriteBufferQueue* write_queue_ = nullptr;
+  DvrSurface* direct_surface_ = nullptr;
+
+  // Device display properties.
+  DvrNativeDisplayMetrics display_metrics_;
 };
 
-TEST_F(DvrDisplayTest, DisplaySingleColor) {
-  // Create direct surface.
-  DvrSurface* direct_surface = nullptr;
+TEST_F(DvrDisplayTest, DisplayWithOneBuffer) {
+  // Create a direct surface.
   std::vector<DvrSurfaceAttribute> direct_surface_attributes = {
       {.key = DVR_SURFACE_ATTRIBUTE_DIRECT,
        .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
@@ -43,73 +81,32 @@
   };
   int ret =
       api_.SurfaceCreate(direct_surface_attributes.data(),
-                         direct_surface_attributes.size(), &direct_surface);
+                         direct_surface_attributes.size(), &direct_surface_);
   ASSERT_EQ(ret, 0) << "Failed to create direct surface.";
 
-  // Get screen dimension.
-  DvrNativeDisplayMetrics display_metrics;
-  ret = api_.GetNativeDisplayMetrics(sizeof(display_metrics), &display_metrics);
-  ASSERT_EQ(ret, 0) << "Failed to get display metrics.";
-  ALOGD(
-      "display_width: %d, display_height: %d, display_x_dpi: %d, "
-      "display_y_dpi: %d, vsync_period_ns: %d.",
-      display_metrics.display_width, display_metrics.display_height,
-      display_metrics.display_x_dpi, display_metrics.display_y_dpi,
-      display_metrics.vsync_period_ns);
-
   // Create a buffer queue with the direct surface.
-  constexpr uint32_t kLayerCount = 1;
-  constexpr uint64_t kUsage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
-                              AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT |
-                              AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
-  constexpr uint32_t kFormat = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
   constexpr size_t kCapacity = 1;
-  constexpr size_t kMetadataSize = 0;
-  uint32_t width = display_metrics.display_width;
-  uint32_t height = display_metrics.display_height;
+  uint32_t width = display_metrics_.display_width;
+  uint32_t height = display_metrics_.display_height;
   ret = api_.SurfaceCreateWriteBufferQueue(
-      direct_surface, width, height, kFormat, kLayerCount, kUsage, kCapacity,
+      direct_surface_, width, height, kFormat, kLayerCount, kUsage, kCapacity,
       kMetadataSize, &write_queue_);
   EXPECT_EQ(0, ret) << "Failed to create buffer queue.";
-  ASSERT_NOT_NULL(write_queue_) << "Write buffer queue should not be null.";
+  ASSERT_NE(nullptr, write_queue_) << "Write buffer queue should not be null.";
 
   // Get buffer from WriteBufferQueue.
   DvrWriteBuffer* write_buffer = nullptr;
-  constexpr int kTimeoutMs = 1000;
   DvrNativeBufferMetadata out_meta;
   int out_fence_fd = -1;
   ret = api_.WriteBufferQueueGainBuffer(write_queue_, kTimeoutMs, &write_buffer,
                                         &out_meta, &out_fence_fd);
   EXPECT_EQ(0, ret) << "Failed to get the buffer.";
-  ASSERT_NOT_NULL(write_buffer) << "Gained buffer should not be null.";
+  ASSERT_NE(nullptr, write_buffer) << "Gained buffer should not be null.";
 
-  // Convert to an android hardware buffer.
-  AHardwareBuffer* ah_buffer{nullptr};
-  ret = api_.WriteBufferGetAHardwareBuffer(write_buffer, &ah_buffer);
-  EXPECT_EQ(0, ret) << "Failed to get a hardware buffer from the write buffer.";
-  ASSERT_NOT_NULL(ah_buffer) << "AHardware buffer should not be null.";
-
-  // Change the content of the android hardware buffer.
-  void* buffer_data{nullptr};
-  int32_t fence = -1;
-  ret = AHardwareBuffer_lock(ah_buffer, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
-                             fence, nullptr, &buffer_data);
-  EXPECT_EQ(0, ret) << "Failed to lock the hardware buffer.";
-  ASSERT_NOT_NULL(buffer_data) << "Buffer data should not be null.";
-
-  uint32_t color_texture = 0xff0000ff;  // Red color in RGBA.
-  for (uint32_t i = 0; i < width * height; ++i) {
-    memcpy(reinterpret_cast<void*>(reinterpret_cast<int64_t>(buffer_data) +
-                                   i * sizeof(color_texture)),
-           &color_texture, sizeof(color_texture));
-  }
-
-  fence = -1;
-  ret = AHardwareBuffer_unlock(ah_buffer, &fence);
-  EXPECT_EQ(0, ret) << "Failed to unlock the hardware buffer.";
-
-  // Release the android hardware buffer.
-  AHardwareBuffer_release(ah_buffer);
+  // Color the write buffer.
+  FillWriteBuffer(write_buffer,
+                  {0xff000000, 0x00ff0000, 0x0000ff00, 0x000000ff, 0x00000000},
+                  width, height);
 
   // Post buffer.
   int ready_fence_fd = -1;
@@ -118,4 +115,237 @@
   EXPECT_EQ(0, ret) << "Failed to post the buffer.";
 
   sleep(5);  // For visual check on the device under test.
+  // Should observe three primary colors on the screen center.
+}
+
+TEST_F(DvrDisplayTest, DisplayWithDoubleBuffering) {
+  // Create a direct surface.
+  std::vector<DvrSurfaceAttribute> direct_surface_attributes = {
+      {.key = DVR_SURFACE_ATTRIBUTE_DIRECT,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+      {.key = DVR_SURFACE_ATTRIBUTE_Z_ORDER,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_INT32,
+       .value.int32_value = 10},
+      {.key = DVR_SURFACE_ATTRIBUTE_VISIBLE,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+  };
+  int ret =
+      api_.SurfaceCreate(direct_surface_attributes.data(),
+                         direct_surface_attributes.size(), &direct_surface_);
+  ASSERT_EQ(ret, 0) << "Failed to create direct surface.";
+
+  // Create a buffer queue with the direct surface.
+  constexpr size_t kCapacity = 2;
+  uint32_t width = display_metrics_.display_width;
+  uint32_t height = display_metrics_.display_height;
+  ret = api_.SurfaceCreateWriteBufferQueue(
+      direct_surface_, width, height, kFormat, kLayerCount, kUsage, kCapacity,
+      kMetadataSize, &write_queue_);
+  EXPECT_EQ(0, ret) << "Failed to create buffer queue.";
+  ASSERT_NE(nullptr, write_queue_) << "Write buffer queue should not be null.";
+
+  int num_display_cycles_in_5s = 5 / (display_metrics_.vsync_period_ns / 1e9);
+  ALOGD("The number of display cycles: %d", num_display_cycles_in_5s);
+  int bufferhub_id_prev_write_buffer = -1;
+  for (int i = 0; i < num_display_cycles_in_5s; ++i) {
+    // Get a buffer from the WriteBufferQueue.
+    DvrWriteBuffer* write_buffer = nullptr;
+    DvrNativeBufferMetadata out_meta;
+    int out_fence_fd = -1;
+    ret = api_.WriteBufferQueueGainBuffer(
+        write_queue_, kTimeoutMs, &write_buffer, &out_meta, &out_fence_fd);
+    EXPECT_EQ(0, ret) << "Failed to get the a write buffer.";
+    ASSERT_NE(nullptr, write_buffer) << "The gained buffer should not be null.";
+
+    int bufferhub_id = api_.WriteBufferGetId(write_buffer);
+    ALOGD("Display cycle: %d, bufferhub id of the write buffer: %d", i,
+          bufferhub_id);
+    EXPECT_NE(bufferhub_id_prev_write_buffer, bufferhub_id)
+        << "Double buffering should be using the two buffers in turns, not "
+           "reusing the same write buffer.";
+    bufferhub_id_prev_write_buffer = bufferhub_id;
+
+    // Color the write buffer.
+    if (i % 2) {
+      FillWriteBuffer(write_buffer, {0xffff0000, 0xff00ff00, 0xff0000ff}, width,
+                      height);
+    } else {
+      FillWriteBuffer(write_buffer, {0xff00ff00, 0xff0000ff, 0xffff0000}, width,
+                      height);
+    }
+
+    // Post the write buffer.
+    int ready_fence_fd = -1;
+    ret = api_.WriteBufferQueuePostBuffer(write_queue_, write_buffer, &out_meta,
+                                          ready_fence_fd);
+    EXPECT_EQ(0, ret) << "Failed to post the buffer.";
+  }
+  // Should observe blinking screen in secondary colors
+  // although it is actually displaying primary colors.
+}
+
+TEST_F(DvrDisplayTest, DisplayWithTwoHardwareLayers) {
+  // Create the direct_surface_0 of z order 10 and direct_surface_1 of z
+  // order 11.
+  DvrSurface* direct_surface_0 = nullptr;
+  std::vector<DvrSurfaceAttribute> direct_surface_0_attributes = {
+      {.key = DVR_SURFACE_ATTRIBUTE_DIRECT,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+      {.key = DVR_SURFACE_ATTRIBUTE_Z_ORDER,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_INT32,
+       .value.int32_value = 10},
+      {.key = DVR_SURFACE_ATTRIBUTE_VISIBLE,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+  };
+  int ret =
+      api_.SurfaceCreate(direct_surface_0_attributes.data(),
+                         direct_surface_0_attributes.size(), &direct_surface_0);
+  EXPECT_EQ(ret, 0) << "Failed to create direct surface.";
+
+  DvrSurface* direct_surface_1 = nullptr;
+  std::vector<DvrSurfaceAttribute> direct_surface_1_attributes = {
+      {.key = DVR_SURFACE_ATTRIBUTE_DIRECT,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+      {.key = DVR_SURFACE_ATTRIBUTE_Z_ORDER,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_INT32,
+       .value.int32_value = 11},
+      {.key = DVR_SURFACE_ATTRIBUTE_VISIBLE,
+       .value.type = DVR_SURFACE_ATTRIBUTE_TYPE_BOOL,
+       .value.bool_value = true},
+  };
+  ret =
+      api_.SurfaceCreate(direct_surface_1_attributes.data(),
+                         direct_surface_1_attributes.size(), &direct_surface_1);
+  EXPECT_EQ(ret, 0) << "Failed to create direct surface.";
+
+  // Create a buffer queue for each of the direct surfaces.
+  constexpr size_t kCapacity = 1;
+  uint32_t width = display_metrics_.display_width;
+  uint32_t height = display_metrics_.display_height;
+
+  DvrWriteBufferQueue* write_queue_0 = nullptr;
+  ret = api_.SurfaceCreateWriteBufferQueue(
+      direct_surface_0, width, height, kFormat, kLayerCount, kUsage, kCapacity,
+      kMetadataSize, &write_queue_0);
+  EXPECT_EQ(0, ret) << "Failed to create buffer queue.";
+  EXPECT_NE(nullptr, write_queue_0) << "Write buffer queue should not be null.";
+
+  DvrWriteBufferQueue* write_queue_1 = nullptr;
+  ret = api_.SurfaceCreateWriteBufferQueue(
+      direct_surface_1, width, height, kFormat, kLayerCount, kUsage, kCapacity,
+      kMetadataSize, &write_queue_1);
+  EXPECT_EQ(0, ret) << "Failed to create buffer queue.";
+  EXPECT_NE(nullptr, write_queue_1) << "Write buffer queue should not be null.";
+
+  // Get a buffer from each of the write buffer queues.
+  DvrWriteBuffer* write_buffer_0 = nullptr;
+  DvrNativeBufferMetadata out_meta_0;
+  int out_fence_fd = -1;
+  ret = api_.WriteBufferQueueGainBuffer(
+      write_queue_0, kTimeoutMs, &write_buffer_0, &out_meta_0, &out_fence_fd);
+  EXPECT_EQ(0, ret) << "Failed to get the buffer.";
+  EXPECT_NE(nullptr, write_buffer_0) << "Gained buffer should not be null.";
+
+  DvrWriteBuffer* write_buffer_1 = nullptr;
+  DvrNativeBufferMetadata out_meta_1;
+  out_fence_fd = -1;
+  ret = api_.WriteBufferQueueGainBuffer(
+      write_queue_1, kTimeoutMs, &write_buffer_1, &out_meta_1, &out_fence_fd);
+  EXPECT_EQ(0, ret) << "Failed to get the buffer.";
+  EXPECT_NE(nullptr, write_buffer_1) << "Gained buffer should not be null.";
+
+  // Color the write buffers.
+  FillWriteBuffer(write_buffer_0, {0xffff0000, 0xff00ff00, 0xff0000ff}, width,
+                  height);
+  FillWriteBuffer(write_buffer_1, {0x7f00ff00, 0x7f0000ff, 0x7fff0000}, width,
+                  height);
+
+  // Post buffers.
+  int ready_fence_fd = -1;
+  ret = api_.WriteBufferQueuePostBuffer(write_queue_0, write_buffer_0,
+                                        &out_meta_0, ready_fence_fd);
+  EXPECT_EQ(0, ret) << "Failed to post the buffer.";
+
+  ready_fence_fd = -1;
+  ret = api_.WriteBufferQueuePostBuffer(write_queue_1, write_buffer_1,
+                                        &out_meta_1, ready_fence_fd);
+  EXPECT_EQ(0, ret) << "Failed to post the buffer.";
+
+  sleep(5);  // For visual check on the device under test.
+  // Should observe three secondary colors.
+
+  // Test finished. Clean up buffers and surfaces.
+  if (write_queue_0 != nullptr) {
+    api_.WriteBufferQueueDestroy(write_queue_0);
+    write_queue_0 = nullptr;
+  }
+  if (write_queue_1 != nullptr) {
+    api_.WriteBufferQueueDestroy(write_queue_1);
+    write_queue_1 = nullptr;
+  }
+  if (direct_surface_0 != nullptr) {
+    api_.SurfaceDestroy(direct_surface_0);
+  }
+  if (direct_surface_1 != nullptr) {
+    api_.SurfaceDestroy(direct_surface_1);
+  }
+}
+
+void DvrDisplayTest::FillWriteBuffer(
+    DvrWriteBuffer* write_buffer, const std::vector<uint32_t>& color_textures,
+    uint32_t width, uint32_t height) {
+  uint32_t num_colors = color_textures.size();
+  // Convert the first write buffer to an android hardware buffer.
+  AHardwareBuffer* ah_buffer = nullptr;
+  int ret = api_.WriteBufferGetAHardwareBuffer(write_buffer, &ah_buffer);
+  ASSERT_EQ(0, ret) << "Failed to get a hardware buffer from the write buffer.";
+  ASSERT_NE(nullptr, ah_buffer) << "AHardware buffer should not be null.";
+  AHardwareBuffer_Desc ah_buffer_describe;
+  AHardwareBuffer_describe(ah_buffer, &ah_buffer_describe);
+  ASSERT_EQ(ah_buffer_describe.format, kFormat)
+      << "The format of the android hardware buffer is wrong.";
+  ASSERT_EQ(ah_buffer_describe.layers, kLayerCount)
+      << "The obtained android hardware buffer should have 2 layers.";
+  ASSERT_EQ(ah_buffer_describe.width, width)
+      << "The obtained android hardware buffer width is wrong.";
+  ASSERT_EQ(ah_buffer_describe.height, height)
+      << "The obtained android hardware buffer height is wrong.";
+  // Change the content of the android hardware buffer.
+  void* buffer_data = nullptr;
+  int32_t fence = -1;
+  ret = AHardwareBuffer_lock(ah_buffer, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
+                             fence, nullptr, &buffer_data);
+  ASSERT_EQ(0, ret) << "Failed to lock the hardware buffer.";
+  ASSERT_NE(nullptr, buffer_data) << "Buffer data should not be null.";
+
+  uint32_t num_pixels = width * height / num_colors;
+  for (uint32_t color_index = 0; color_index < num_colors - 1; ++color_index) {
+    uint32_t color_texture = color_textures[color_index];
+    for (uint32_t i = 0; i < num_pixels; ++i) {
+      memcpy(reinterpret_cast<void*>(reinterpret_cast<int64_t>(buffer_data) +
+                                     (i + num_pixels * color_index) *
+                                         sizeof(color_texture)),
+             &color_texture, sizeof(color_texture));
+    }
+  }
+  uint32_t color_texture = color_textures[num_colors - 1];
+  uint32_t num_colored_pixels = num_pixels * (num_colors - 1);
+  num_pixels = width * height - num_colored_pixels;
+  for (uint32_t i = 0; i < num_pixels; ++i) {
+    memcpy(reinterpret_cast<void*>(reinterpret_cast<int64_t>(buffer_data) +
+                                   (i + num_colored_pixels) *
+                                       sizeof(color_texture)),
+           &color_texture, sizeof(color_texture));
+  }
+  fence = -1;
+  ret = AHardwareBuffer_unlock(ah_buffer, &fence);
+  EXPECT_EQ(0, ret) << "Failed to unlock the hardware buffer.";
+
+  // Release the android hardware buffer.
+  AHardwareBuffer_release(ah_buffer);
 }
diff --git a/services/surfaceflinger/BufferLayer.cpp b/services/surfaceflinger/BufferLayer.cpp
index 3dbc136..d07a52b 100644
--- a/services/surfaceflinger/BufferLayer.cpp
+++ b/services/surfaceflinger/BufferLayer.cpp
@@ -98,15 +98,13 @@
 }
 
 bool BufferLayer::isProtected() const {
-    const sp<GraphicBuffer>& buffer(getBE().compositionInfo.mBuffer);
-    return (buffer != 0) &&
-            (buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
+    const sp<GraphicBuffer>& buffer(mActiveBuffer);
+    return (buffer != 0) && (buffer->getUsage() & GRALLOC_USAGE_PROTECTED);
 }
 
 bool BufferLayer::isVisible() const {
     return !(isHiddenByPolicy()) && getAlpha() > 0.0f &&
-            (getBE().compositionInfo.mBuffer != nullptr ||
-             getBE().compositionInfo.hwc.sidebandStream != nullptr);
+            (mActiveBuffer != nullptr || getBE().compositionInfo.hwc.sidebandStream != nullptr);
 }
 
 bool BufferLayer::isFixedSize() const {
@@ -162,7 +160,7 @@
                          bool useIdentityTransform) const {
     ATRACE_CALL();
 
-    if (CC_UNLIKELY(getBE().compositionInfo.mBuffer == 0)) {
+    if (CC_UNLIKELY(mActiveBuffer == 0)) {
         // the texture has not been created yet, this Layer has
         // in fact never been drawn into. This happens frequently with
         // SurfaceView because the WindowManager can't know when the client
@@ -240,8 +238,7 @@
         }
 
         // Set things up for texturing.
-        mTexture.setDimensions(getBE().compositionInfo.mBuffer->getWidth(),
-                               getBE().compositionInfo.mBuffer->getHeight());
+        mTexture.setDimensions(mActiveBuffer->getWidth(), mActiveBuffer->getHeight());
         mTexture.setFiltering(useFiltering);
         mTexture.setMatrix(textureMatrix);
 
@@ -291,12 +288,10 @@
 bool BufferLayer::onPreComposition(nsecs_t refreshStartTime) {
     if (mBufferLatched) {
         Mutex::Autolock lock(mFrameEventHistoryMutex);
-        mFrameEventHistory.addPreComposition(mCurrentFrameNumber,
-                                             refreshStartTime);
+        mFrameEventHistory.addPreComposition(mCurrentFrameNumber, refreshStartTime);
     }
     mRefreshPending = false;
-    return mQueuedFrames > 0 || mSidebandStreamChanged ||
-            mAutoRefresh;
+    return mQueuedFrames > 0 || mSidebandStreamChanged || mAutoRefresh;
 }
 bool BufferLayer::onPostComposition(const std::shared_ptr<FenceTime>& glDoneFence,
                                     const std::shared_ptr<FenceTime>& presentFence,
@@ -308,8 +303,8 @@
     // Update mFrameEventHistory.
     {
         Mutex::Autolock lock(mFrameEventHistoryMutex);
-        mFrameEventHistory.addPostComposition(mCurrentFrameNumber, glDoneFence,
-                                              presentFence, compositorTiming);
+        mFrameEventHistory.addPostComposition(mCurrentFrameNumber, glDoneFence, presentFence,
+                                              compositorTiming);
     }
 
     // Update mFrameTracker.
@@ -358,8 +353,7 @@
         return;
     }
 
-    auto releaseFenceTime =
-            std::make_shared<FenceTime>(mConsumer->getPrevFinalReleaseFence());
+    auto releaseFenceTime = std::make_shared<FenceTime>(mConsumer->getPrevFinalReleaseFence());
     mReleaseTimeline.updateSignalTimes();
     mReleaseTimeline.push(releaseFenceTime);
 
@@ -412,7 +406,7 @@
     // Capture the old state of the layer for comparisons later
     const State& s(getDrawingState());
     const bool oldOpacity = isOpaque(s);
-    sp<GraphicBuffer> oldBuffer = getBE().compositionInfo.mBuffer;
+    sp<GraphicBuffer> oldBuffer = mActiveBuffer;
 
     if (!allTransactionsSignaled()) {
         mFlinger->signalLayerUpdate();
@@ -425,12 +419,10 @@
     // buffer mode.
     bool queuedBuffer = false;
     LayerRejecter r(mDrawingState, getCurrentState(), recomputeVisibleRegions,
-                    getProducerStickyTransform() != 0, mName.string(),
-                    mOverrideScalingMode, mFreezeGeometryUpdates);
-    status_t updateResult =
-            mConsumer->updateTexImage(&r, mFlinger->mPrimaryDispSync,
-                                                    &mAutoRefresh, &queuedBuffer,
-                                                    mLastFrameNumberReceived);
+                    getProducerStickyTransform() != 0, mName.string(), mOverrideScalingMode,
+                    mFreezeGeometryUpdates);
+    status_t updateResult = mConsumer->updateTexImage(&r, mFlinger->mPrimaryDispSync, &mAutoRefresh,
+                                                      &queuedBuffer, mLastFrameNumberReceived);
     if (updateResult == BufferQueue::PRESENT_LATER) {
         // Producer doesn't want buffer to be displayed yet.  Signal a
         // layer update so we check again at the next opportunity.
@@ -483,17 +475,16 @@
 
     // Decrement the queued-frames count.  Signal another event if we
     // have more frames pending.
-    if ((queuedBuffer && android_atomic_dec(&mQueuedFrames) > 1) ||
-        mAutoRefresh) {
+    if ((queuedBuffer && android_atomic_dec(&mQueuedFrames) > 1) || mAutoRefresh) {
         mFlinger->signalLayerUpdate();
     }
 
     // update the active buffer
-    getBE().compositionInfo.mBuffer =
-            mConsumer->getCurrentBuffer(&getBE().compositionInfo.mBufferSlot);
-    // replicated in LayerBE until FE/BE is ready to be synchronized
-    mActiveBuffer = getBE().compositionInfo.mBuffer;
-    if (getBE().compositionInfo.mBuffer == nullptr) {
+    mActiveBuffer = mConsumer->getCurrentBuffer(&mActiveBufferSlot);
+    getBE().compositionInfo.mBuffer = mActiveBuffer;
+    getBE().compositionInfo.mBufferSlot = mActiveBufferSlot;
+
+    if (mActiveBuffer == nullptr) {
         // this can only happen if the very first buffer was rejected.
         return outDirtyRegion;
     }
@@ -520,8 +511,7 @@
     Rect crop(mConsumer->getCurrentCrop());
     const uint32_t transform(mConsumer->getCurrentTransform());
     const uint32_t scalingMode(mConsumer->getCurrentScalingMode());
-    if ((crop != mCurrentCrop) ||
-        (transform != mCurrentTransform) ||
+    if ((crop != mCurrentCrop) || (transform != mCurrentTransform) ||
         (scalingMode != mCurrentScalingMode)) {
         mCurrentCrop = crop;
         mCurrentTransform = transform;
@@ -530,15 +520,14 @@
     }
 
     if (oldBuffer != nullptr) {
-        uint32_t bufWidth = getBE().compositionInfo.mBuffer->getWidth();
-        uint32_t bufHeight = getBE().compositionInfo.mBuffer->getHeight();
-        if (bufWidth != uint32_t(oldBuffer->width) ||
-            bufHeight != uint32_t(oldBuffer->height)) {
+        uint32_t bufWidth = mActiveBuffer->getWidth();
+        uint32_t bufHeight = mActiveBuffer->getHeight();
+        if (bufWidth != uint32_t(oldBuffer->width) || bufHeight != uint32_t(oldBuffer->height)) {
             recomputeVisibleRegions = true;
         }
     }
 
-    mCurrentOpacity = getOpacityForFormat(getBE().compositionInfo.mBuffer->format);
+    mCurrentOpacity = getOpacityForFormat(mActiveBuffer->format);
     if (oldOpacity != isOpaque(s)) {
         recomputeVisibleRegions = true;
     }
@@ -631,8 +620,8 @@
 
     uint32_t hwcSlot = 0;
     sp<GraphicBuffer> hwcBuffer;
-    hwcInfo.bufferCache.getHwcBuffer(getBE().compositionInfo.mBufferSlot,
-                                     getBE().compositionInfo.mBuffer, &hwcSlot, &hwcBuffer);
+    getBE().mHwcLayers[hwcId].bufferCache.getHwcBuffer(mActiveBufferSlot, mActiveBuffer, &hwcSlot,
+                                                       &hwcBuffer);
 
     auto acquireFence = mConsumer->getCurrentFence();
     error = hwcLayer->setBuffer(hwcSlot, hwcBuffer, acquireFence);
@@ -646,7 +635,7 @@
 bool BufferLayer::isOpaque(const Layer::State& s) const {
     // if we don't have a buffer or sidebandStream yet, we're translucent regardless of the
     // layer's opaque flag.
-    if ((getBE().compositionInfo.hwc.sidebandStream == nullptr) && (getBE().compositionInfo.mBuffer == nullptr)) {
+    if ((getBE().compositionInfo.hwc.sidebandStream == nullptr) && (mActiveBuffer == nullptr)) {
         return false;
     }
 
@@ -661,8 +650,7 @@
     sp<IGraphicBufferConsumer> consumer;
     BufferQueue::createBufferQueue(&producer, &consumer, true);
     mProducer = new MonitoredProducer(producer, mFlinger, this);
-    mConsumer = new BufferLayerConsumer(consumer,
-            mFlinger->getRenderEngine(), mTextureName, this);
+    mConsumer = new BufferLayerConsumer(consumer, mFlinger->getRenderEngine(), mTextureName, this);
     mConsumer->setConsumerUsageBits(getEffectiveUsage(0));
     mConsumer->setContentsChangedListener(this);
     mConsumer->setName(mName);
@@ -694,8 +682,7 @@
 
         // Ensure that callbacks are handled in order
         while (item.mFrameNumber != mLastFrameNumberReceived + 1) {
-            status_t result = mQueueItemCondition.waitRelative(mQueueItemLock,
-                                                               ms2ns(500));
+            status_t result = mQueueItemCondition.waitRelative(mQueueItemLock, ms2ns(500));
             if (result != NO_ERROR) {
                 ALOGE("[%s] Timed out waiting on callback", mName.string());
             }
@@ -718,8 +705,7 @@
 
         // Ensure that callbacks are handled in order
         while (item.mFrameNumber != mLastFrameNumberReceived + 1) {
-            status_t result = mQueueItemCondition.waitRelative(mQueueItemLock,
-                                                               ms2ns(500));
+            status_t result = mQueueItemCondition.waitRelative(mQueueItemLock, ms2ns(500));
             if (result != NO_ERROR) {
                 ALOGE("[%s] Timed out waiting on callback", mName.string());
             }
@@ -884,8 +870,7 @@
         // able to be latched. To avoid this, grab this buffer anyway.
         return true;
     }
-    return mQueueItems[0].mFenceTime->getSignalTime() !=
-            Fence::SIGNAL_TIME_PENDING;
+    return mQueueItems[0].mFenceTime->getSignalTime() != Fence::SIGNAL_TIME_PENDING;
 }
 
 uint32_t BufferLayer::getEffectiveScalingMode() const {
diff --git a/services/surfaceflinger/Layer.cpp b/services/surfaceflinger/Layer.cpp
index 78dd40b..0299270 100644
--- a/services/surfaceflinger/Layer.cpp
+++ b/services/surfaceflinger/Layer.cpp
@@ -1440,7 +1440,7 @@
     info.mMatrix[1][0] = ds.active.transform[1][0];
     info.mMatrix[1][1] = ds.active.transform[1][1];
     {
-        sp<const GraphicBuffer> buffer = getBE().compositionInfo.mBuffer;
+        sp<const GraphicBuffer> buffer = mActiveBuffer;
         if (buffer != 0) {
             info.mActiveBufferWidth = buffer->getWidth();
             info.mActiveBufferHeight = buffer->getHeight();
diff --git a/services/surfaceflinger/Layer.h b/services/surfaceflinger/Layer.h
index 3671a2b..b203a7b 100644
--- a/services/surfaceflinger/Layer.h
+++ b/services/surfaceflinger/Layer.h
@@ -42,6 +42,7 @@
 #include "MonitoredProducer.h"
 #include "SurfaceFlinger.h"
 #include "Transform.h"
+#include "LayerBE.h"
 
 #include <layerproto/LayerProtoHeader.h>
 #include "DisplayHardware/HWComposer.h"
@@ -68,68 +69,6 @@
 
 // ---------------------------------------------------------------------------
 
-struct CompositionInfo {
-    HWC2::Composition compositionType;
-    sp<GraphicBuffer> mBuffer = nullptr;
-    int mBufferSlot = BufferQueue::INVALID_BUFFER_SLOT;
-    struct {
-        HWComposer* hwc;
-        sp<Fence> fence;
-        HWC2::BlendMode blendMode;
-        Rect displayFrame;
-        float alpha;
-        FloatRect sourceCrop;
-        HWC2::Transform transform;
-        int z;
-        int type;
-        int appId;
-        Region visibleRegion;
-        Region surfaceDamage;
-        sp<NativeHandle> sidebandStream;
-        android_dataspace dataspace;
-        hwc_color_t color;
-    } hwc;
-    struct {
-        RE::RenderEngine* renderEngine;
-        Mesh* mesh;
-    } renderEngine;
-};
-
-class LayerBE {
-public:
-    LayerBE();
-
-    // The mesh used to draw the layer in GLES composition mode
-    Mesh mMesh;
-
-    // HWC items, accessed from the main thread
-    struct HWCInfo {
-        HWCInfo()
-              : hwc(nullptr),
-                layer(nullptr),
-                forceClientComposition(false),
-                compositionType(HWC2::Composition::Invalid),
-                clearClientTarget(false) {}
-
-        HWComposer* hwc;
-        HWC2::Layer* layer;
-        bool forceClientComposition;
-        HWC2::Composition compositionType;
-        bool clearClientTarget;
-        Rect displayFrame;
-        FloatRect sourceCrop;
-        HWComposerBufferCache bufferCache;
-    };
-
-    // A layer can be attached to multiple displays when operating in mirror mode
-    // (a.k.a: when several displays are attached with equal layerStack). In this
-    // case we need to keep track. In non-mirror mode, a layer will have only one
-    // HWCInfo. This map key is a display layerStack.
-    std::unordered_map<int32_t, HWCInfo> mHwcLayers;
-
-    CompositionInfo compositionInfo;
-};
-
 class Layer : public virtual RefBase {
     static int32_t sSequence;
 
diff --git a/services/surfaceflinger/LayerBE.h b/services/surfaceflinger/LayerBE.h
new file mode 100644
index 0000000..5965f23
--- /dev/null
+++ b/services/surfaceflinger/LayerBE.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <ui/Region.h>
+
+#include "SurfaceFlinger.h"
+
+#include "DisplayHardware/HWComposer.h"
+#include "DisplayHardware/HWComposerBufferCache.h"
+#include "RenderEngine/Mesh.h"
+
+namespace android {
+
+struct CompositionInfo {
+    HWC2::Composition compositionType;
+    sp<GraphicBuffer> mBuffer = nullptr;
+    int mBufferSlot = BufferQueue::INVALID_BUFFER_SLOT;
+    struct {
+        HWC2::Layer* hwcLayer;
+        int32_t hwid = -1;
+        sp<Fence> fence;
+        HWC2::BlendMode blendMode = HWC2::BlendMode::Invalid;
+        Rect displayFrame;
+        float alpha;
+        FloatRect sourceCrop;
+        HWC2::Transform transform = HWC2::Transform::None;
+        int z;
+        int type;
+        int appId;
+        Region visibleRegion;
+        Region surfaceDamage;
+        sp<NativeHandle> sidebandStream;
+        android_dataspace dataspace;
+        hwc_color_t color;
+    } hwc;
+    struct {
+        Mesh* mesh;
+    } renderEngine;
+};
+
+class LayerBE {
+public:
+    LayerBE();
+
+    // The mesh used to draw the layer in GLES composition mode
+    Mesh mMesh;
+
+    // HWC items, accessed from the main thread
+    struct HWCInfo {
+        HWCInfo()
+              : hwc(nullptr),
+                layer(nullptr),
+                forceClientComposition(false),
+                compositionType(HWC2::Composition::Invalid),
+                clearClientTarget(false) {}
+
+        HWComposer* hwc;
+        HWC2::Layer* layer;
+        bool forceClientComposition;
+        HWC2::Composition compositionType;
+        bool clearClientTarget;
+        Rect displayFrame;
+        FloatRect sourceCrop;
+        HWComposerBufferCache bufferCache;
+    };
+
+
+    // A layer can be attached to multiple displays when operating in mirror mode
+    // (a.k.a: when several displays are attached with equal layerStack). In this
+    // case we need to keep track. In non-mirror mode, a layer will have only one
+    // HWCInfo. This map key is a display layerStack.
+    std::unordered_map<int32_t, HWCInfo> mHwcLayers;
+
+    CompositionInfo compositionInfo;
+};
+
+}; // namespace android
+
diff --git a/services/surfaceflinger/SurfaceFlinger.h b/services/surfaceflinger/SurfaceFlinger.h
index 392acaa..bebd6a5 100644
--- a/services/surfaceflinger/SurfaceFlinger.h
+++ b/services/surfaceflinger/SurfaceFlinger.h
@@ -44,6 +44,7 @@
 #include <gui/LayerState.h>
 
 #include <gui/OccupancyTracker.h>
+#include <gui/BufferQueue.h>
 
 #include <hardware/hwcomposer_defs.h>
 
@@ -60,6 +61,7 @@
 #include "SurfaceInterceptor.h"
 #include "SurfaceTracing.h"
 #include "StartPropertySetThread.h"
+#include "LayerBE.h"
 
 #include "DisplayHardware/HWC2.h"
 #include "DisplayHardware/HWComposer.h"
diff --git a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
index fafc54e..21590df 100644
--- a/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
+++ b/services/surfaceflinger/tests/unittests/DisplayTransactionTest.cpp
@@ -165,9 +165,7 @@
 
     EXPECT_CALL(*mComposer, getActiveConfig(DisplayDevice::DISPLAY_PRIMARY, _))
             .WillOnce(DoAll(SetArgPointee<1>(0), Return(Error::NONE)));
-    EXPECT_CALL(*mComposer, getColorModes(DisplayDevice::DISPLAY_PRIMARY, _))
-            .WillOnce(DoAll(SetArgPointee<1>(std::vector<ColorMode>({ColorMode::NATIVE})),
-                            Return(Error::NONE)));
+    EXPECT_CALL(*mComposer, getColorModes(DisplayDevice::DISPLAY_PRIMARY, _)).Times(0);
     EXPECT_CALL(*mComposer, getHdrCapabilities(DisplayDevice::DISPLAY_PRIMARY, _, _, _, _))
             .WillOnce(DoAll(SetArgPointee<1>(std::vector<Hdr>()), Return(Error::NONE)));
 
diff --git a/vulkan/libvulkan/driver.cpp b/vulkan/libvulkan/driver.cpp
index a9d473d..dec39e0 100644
--- a/vulkan/libvulkan/driver.cpp
+++ b/vulkan/libvulkan/driver.cpp
@@ -407,6 +407,12 @@
     for (uint32_t i = 0; i < ext_count; i++)
         FilterExtension(ext_names[i]);
 
+    // Enable device extensions that contain physical-device commands, so that
+    // vkGetInstanceProcAddr will return those physical-device commands.
+    if (is_instance_) {
+        hook_extensions_.set(ProcHook::KHR_swapchain);
+    }
+
     ext_names = extension_filter_.names;
     ext_count = extension_filter_.name_count;