Merge changes I9e4cbf11,I41cde13a

* changes:
  trusty: Allow fuzzing without coverage
  trusty: Fix up error messages
diff --git a/NOTICE b/NOTICE
index 152be20..8e8a91c 100644
--- a/NOTICE
+++ b/NOTICE
@@ -1,324 +1,16 @@
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for the Android-specific code.                        ==
-   =========================================================================
+Copyright (C) 2017 The Android Open Source Project
 
-Android Code
-Copyright 2005-2008 The Android Open Source Project
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
 
-This product includes software developed as part of
-The Android Open Source Project (http://source.android.com).
+     http://www.apache.org/licenses/LICENSE-2.0
 
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for Apache Commons code.                              ==
-   =========================================================================
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
 
-Apache Commons
-Copyright 1999-2006 The Apache Software Foundation
+-------------------------------------------------------------------
 
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for Jakarta Commons Logging.                          ==
-   =========================================================================
-
-Jakarta Commons Logging (JCL)
-Copyright 2005,2006 The Apache Software Foundation.
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for the Nuance code.                                  ==
-   =========================================================================
-
-These files are Copyright 2007 Nuance Communications, but released under
-the Apache2 License.
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for the Media Codecs code.                            ==
-   =========================================================================
-
-Media Codecs
-These files are Copyright 1998 - 2009 PacketVideo, but released under
-the Apache2 License.
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for the TagSoup code.                                 ==
-   =========================================================================
-
-This file is part of TagSoup and is Copyright 2002-2008 by John Cowan.
-
-TagSoup is licensed under the Apache License,
-Version 2.0.  You may obtain a copy of this license at
-http://www.apache.org/licenses/LICENSE-2.0 .  You may also have
-additional legal rights not granted by this license.
-
-TagSoup is distributed in the hope that it will be useful, but
-unless required by applicable law or agreed to in writing, TagSoup
-is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
-OF ANY KIND, either express or implied; not even the implied warranty
-of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-
-   =========================================================================
-   ==  NOTICE file corresponding to the section 4 d of                    ==
-   ==  the Apache License, Version 2.0,                                   ==
-   ==  in this case for Additional Codecs code.                           ==
-   =========================================================================
-
-Additional Codecs
-These files are Copyright 2003-2010 VisualOn, but released under
-the Apache2 License.
-
-  =========================================================================
-  ==  NOTICE file corresponding to the section 4 d of                    ==
-  ==  the Apache License, Version 2.0,                                   ==
-  ==  in this case for the Audio Effects code.                           ==
-  =========================================================================
-
-Audio Effects
-These files are Copyright (C) 2004-2010 NXP Software and
-Copyright (C) 2010 The Android Open Source Project, but released under
-the Apache2 License.
-
-
-                               Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-
-
-UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
-
-Unicode Data Files include all data files under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/,
-and http://www.unicode.org/cldr/data/ . Unicode Software includes any
-source code published in the Unicode Standard or under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/, and
-http://www.unicode.org/cldr/data/.
-
-NOTICE TO USER: Carefully read the following legal agreement. BY
-DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA
-FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY
-ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF
-THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY,
-DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
-
-COPYRIGHT AND PERMISSION NOTICE
-
-Copyright © 1991-2008 Unicode, Inc. All rights reserved. Distributed
-under the Terms of Use in http://www.unicode.org/copyright.html.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Unicode data files and any associated documentation (the
-"Data Files") or Unicode software and any associated documentation (the
-"Software") to deal in the Data Files or Software without restriction,
-including without limitation the rights to use, copy, modify, merge,
-publish, distribute, and/or sell copies of the Data Files or Software,
-and to permit persons to whom the Data Files or Software are furnished to
-do so, provided that (a) the above copyright notice(s) and this permission
-notice appear with all copies of the Data Files or Software, (b) both the
-above copyright notice(s) and this permission notice appear in associated
-documentation, and (c) there is clear notice in each modified Data File
-or in the Software as well as in the documentation associated with the
-Data File(s) or Software that the data or software has been modified.
-
-THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
-OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
-INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
-OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
-OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
-OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
-
-Except as contained in this notice, the name of a copyright holder
-shall not be used in advertising or otherwise to promote the sale, use
-or other dealings in these Data Files or Software without prior written
-authorization of the copyright holder.
diff --git a/bootstat/Android.bp b/bootstat/Android.bp
index edff26d..5192f5e 100644
--- a/bootstat/Android.bp
+++ b/bootstat/Android.bp
@@ -93,4 +93,7 @@
         "boot_event_record_store_test.cpp",
         "testrunner.cpp",
     ],
+    test_options: {
+        unit_test: true,
+    },
 }
diff --git a/bootstat/AndroidTest.xml b/bootstat/AndroidTest.xml
deleted file mode 100644
index f3783fa..0000000
--- a/bootstat/AndroidTest.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2017 The Android Open Source Project
-
-     Licensed under the Apache License, Version 2.0 (the "License");
-     you may not use this file except in compliance with the License.
-     You may obtain a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-     Unless required by applicable law or agreed to in writing, software
-     distributed under the License is distributed on an "AS IS" BASIS,
-     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     See the License for the specific language governing permissions and
-     limitations under the License.
--->
-<configuration description="Config for bootstat_tests">
-    <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
-        <option name="cleanup" value="true" />
-        <option name="push" value="bootstat_tests->/data/local/tmp/bootstat_tests" />
-    </target_preparer>
-    <option name="test-suite-tag" value="apct" />
-    <test class="com.android.tradefed.testtype.GTest" >
-        <option name="native-test-device-path" value="/data/local/tmp" />
-        <option name="module-name" value="bootstat_tests" />
-    </test>
-</configuration>
\ No newline at end of file
diff --git a/debuggerd/crash_dump.cpp b/debuggerd/crash_dump.cpp
index 51afcc2..68a43cf 100644
--- a/debuggerd/crash_dump.cpp
+++ b/debuggerd/crash_dump.cpp
@@ -153,14 +153,14 @@
   }
 
   struct timeval tv = {
-    .tv_sec = 1,
-    .tv_usec = 0,
+      .tv_sec = 1 * android::base::TimeoutMultiplier(),
+      .tv_usec = 0,
   };
   if (setsockopt(amfd.get(), SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) == -1) {
     PLOG(ERROR) << "failed to set send timeout on activity manager socket";
     return false;
   }
-  tv.tv_sec = 3;  // 3 seconds on handshake read
+  tv.tv_sec = 3 * android::base::TimeoutMultiplier();  // 3 seconds on handshake read
   if (setsockopt(amfd.get(), SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)) == -1) {
     PLOG(ERROR) << "failed to set receive timeout on activity manager socket";
     return false;
@@ -447,7 +447,7 @@
   //
   // Note: processes with many threads and minidebug-info can take a bit to
   //       unwind, do not make this too small. b/62828735
-  alarm(30);
+  alarm(30 * android::base::TimeoutMultiplier());
 
   // Get the process name (aka cmdline).
   std::string process_name = get_process_name(g_target_thread);
diff --git a/debuggerd/debuggerd_test.cpp b/debuggerd/debuggerd_test.cpp
index b9d6606..65820bd 100644
--- a/debuggerd/debuggerd_test.cpp
+++ b/debuggerd/debuggerd_test.cpp
@@ -311,7 +311,7 @@
 
   if (mte_supported()) {
     // Test that the default TAGGED_ADDR_CTRL value is set.
-    ASSERT_MATCH(result, R"(tagged_addr_ctrl: 000000000007fff5)");
+    ASSERT_MATCH(result, R"(tagged_addr_ctrl: 000000000007fff3)");
   }
 }
 
diff --git a/debuggerd/libdebuggerd/test/tombstone_test.cpp b/debuggerd/libdebuggerd/test/tombstone_test.cpp
index b42d70c..7fe8f82 100644
--- a/debuggerd/libdebuggerd/test/tombstone_test.cpp
+++ b/debuggerd/libdebuggerd/test/tombstone_test.cpp
@@ -379,7 +379,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_uaf_exact) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::USE_AFTER_FREE, &meta);
   crash_data.SetCrashAddress(0x1000);
@@ -396,7 +396,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_double_free) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::DOUBLE_FREE, &meta);
   crash_data.SetCrashAddress(0x1000);
@@ -413,7 +413,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_overflow) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::BUFFER_OVERFLOW, &meta);
   crash_data.SetCrashAddress(0x1025);
@@ -432,7 +432,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_underflow) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::BUFFER_UNDERFLOW, &meta);
   crash_data.SetCrashAddress(0xffe);
@@ -451,7 +451,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_invalid_free_inside) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::INVALID_FREE, &meta);
   crash_data.SetCrashAddress(0x1001);
@@ -470,7 +470,7 @@
 TEST_F(TombstoneTest, gwp_asan_cause_invalid_free_outside) {
   gwp_asan::AllocationMetadata meta;
   meta.Addr = 0x1000;
-  meta.Size = 32;
+  meta.RequestedSize = 32;
 
   GwpAsanCrashDataTest crash_data(gwp_asan::Error::INVALID_FREE, &meta);
   crash_data.SetCrashAddress(0x1021);
diff --git a/debuggerd/proto/Android.bp b/debuggerd/proto/Android.bp
index 5307d50..bb82f03 100644
--- a/debuggerd/proto/Android.bp
+++ b/debuggerd/proto/Android.bp
@@ -1,4 +1,9 @@
-cc_library {
+filegroup {
+    name: "libtombstone_proto-src",
+    srcs: ["tombstone.proto"],
+}
+
+cc_library_static {
     name: "libtombstone_proto",
     cflags: [
         "-Wall",
@@ -14,9 +19,11 @@
         type: "lite",
     },
 
-    srcs: [
-        "tombstone.proto",
-    ],
+    srcs: [":libtombstone_proto-src"],
+
+    // b/155341058: Soong doesn't automatically add libprotobuf if there aren't any explicitly
+    // listed protos in srcs.
+    static_libs: ["libprotobuf-cpp-lite"],
 
     stl: "libc++_static",
     apex_available: [
diff --git a/debuggerd/proto/tombstone.proto b/debuggerd/proto/tombstone.proto
index aff50bd..38a06f4 100644
--- a/debuggerd/proto/tombstone.proto
+++ b/debuggerd/proto/tombstone.proto
@@ -1,5 +1,8 @@
 syntax = "proto3";
 
+option java_package = "com.android.server.os";
+option java_outer_classname = "TombstoneProtos";
+
 message Tombstone {
   Architecture arch = 1;
   string build_fingerprint = 2;
diff --git a/debuggerd/tombstoned/intercept_manager.cpp b/debuggerd/tombstoned/intercept_manager.cpp
index 437639e..4d4646a 100644
--- a/debuggerd/tombstoned/intercept_manager.cpp
+++ b/debuggerd/tombstoned/intercept_manager.cpp
@@ -26,6 +26,7 @@
 
 #include <android-base/cmsg.h>
 #include <android-base/logging.h>
+#include <android-base/properties.h>
 #include <android-base/unique_fd.h>
 
 #include "protocol.h"
@@ -162,7 +163,7 @@
     event_assign(intercept->intercept_event, intercept_manager->base, sockfd, EV_READ | EV_TIMEOUT,
                  intercept_close_cb, arg);
 
-    struct timeval timeout = { .tv_sec = 10, .tv_usec = 0 };
+    struct timeval timeout = {.tv_sec = 10 * android::base::TimeoutMultiplier(), .tv_usec = 0};
     event_add(intercept->intercept_event, &timeout);
   }
 
@@ -178,7 +179,7 @@
   intercept->intercept_manager = static_cast<InterceptManager*>(arg);
   intercept->sockfd.reset(sockfd);
 
-  struct timeval timeout = { 1, 0 };
+  struct timeval timeout = {1 * android::base::TimeoutMultiplier(), 0};
   event_base* base = evconnlistener_get_base(listener);
   event* intercept_event =
     event_new(base, sockfd, EV_TIMEOUT | EV_READ, intercept_request_cb, intercept);
diff --git a/debuggerd/tombstoned/tombstoned.cpp b/debuggerd/tombstoned/tombstoned.cpp
index f057260..bc2d33d 100644
--- a/debuggerd/tombstoned/tombstoned.cpp
+++ b/debuggerd/tombstoned/tombstoned.cpp
@@ -143,13 +143,13 @@
     CrashArtifact result;
 
     std::optional<std::string> path;
-    result.fd.reset(openat(dir_fd_, ".", O_WRONLY | O_APPEND | O_TMPFILE | O_CLOEXEC, 0640));
+    result.fd.reset(openat(dir_fd_, ".", O_WRONLY | O_APPEND | O_TMPFILE | O_CLOEXEC, 0660));
     if (result.fd == -1) {
       // We might not have O_TMPFILE. Try creating with an arbitrary filename instead.
       static size_t counter = 0;
       std::string tmp_filename = StringPrintf(".temporary%zu", counter++);
       result.fd.reset(openat(dir_fd_, tmp_filename.c_str(),
-                             O_WRONLY | O_APPEND | O_CREAT | O_TRUNC | O_CLOEXEC, 0640));
+                             O_WRONLY | O_APPEND | O_CREAT | O_TRUNC | O_CLOEXEC, 0660));
       if (result.fd == -1) {
         PLOG(FATAL) << "failed to create temporary tombstone in " << dir_path_;
       }
@@ -320,7 +320,7 @@
   }
 
   // TODO: Make this configurable by the interceptor?
-  struct timeval timeout = {10, 0};
+  struct timeval timeout = {10 * android::base::TimeoutMultiplier(), 0};
 
   event_base* base = event_get_base(crash->crash_event);
 
@@ -340,7 +340,7 @@
 
   // TODO: Make sure that only java crashes come in on the java socket
   // and only native crashes on the native socket.
-  struct timeval timeout = { 1, 0 };
+  struct timeval timeout = {1 * android::base::TimeoutMultiplier(), 0};
   event* crash_event = event_new(base, sockfd, EV_TIMEOUT | EV_READ, crash_request_cb, crash);
   crash->crash_socket_fd.reset(sockfd);
   crash->crash_event = crash_event;
@@ -408,12 +408,21 @@
   }
 }
 
-static bool link_fd(borrowed_fd fd, borrowed_fd dirfd, const std::string& path) {
-  std::string fd_path = StringPrintf("/proc/self/fd/%d", fd.get());
+static bool rename_tombstone_fd(borrowed_fd fd, borrowed_fd dirfd, const std::string& path) {
+  // Always try to unlink the tombstone file.
+  // linkat doesn't let us replace a file, so we need to unlink before linking
+  // our results onto disk, and if we fail for some reason, we should delete
+  // stale tombstones to avoid confusing inconsistency.
+  int rc = unlinkat(dirfd.get(), path.c_str(), 0);
+  if (rc != 0 && errno != ENOENT) {
+    PLOG(ERROR) << "failed to unlink tombstone at " << path;
+    return false;
+  }
 
-  int rc = linkat(AT_FDCWD, fd_path.c_str(), dirfd.get(), path.c_str(), AT_SYMLINK_FOLLOW);
+  std::string fd_path = StringPrintf("/proc/self/fd/%d", fd.get());
+  rc = linkat(AT_FDCWD, fd_path.c_str(), dirfd.get(), path.c_str(), AT_SYMLINK_FOLLOW);
   if (rc != 0) {
-    PLOG(ERROR) << "failed to link file descriptor";
+    PLOG(ERROR) << "failed to link tombstone at " << path;
     return false;
   }
   return true;
@@ -446,36 +455,22 @@
 
   CrashArtifactPaths paths = queue->get_next_artifact_paths();
 
-  // Always try to unlink the tombstone file.
-  // linkat doesn't let us replace a file, so we need to unlink before linking
-  // our results onto disk, and if we fail for some reason, we should delete
-  // stale tombstones to avoid confusing inconsistency.
-  rc = unlinkat(queue->dir_fd().get(), paths.text.c_str(), 0);
-  if (rc != 0 && errno != ENOENT) {
-    PLOG(ERROR) << "failed to unlink tombstone at " << paths.text;
-    return;
-  }
-
-  if (crash->output.text.fd != -1) {
-    if (!link_fd(crash->output.text.fd, queue->dir_fd(), paths.text)) {
-      LOG(ERROR) << "failed to link tombstone";
+  if (rename_tombstone_fd(crash->output.text.fd, queue->dir_fd(), paths.text)) {
+    if (crash->crash_type == kDebuggerdJavaBacktrace) {
+      LOG(ERROR) << "Traces for pid " << crash->crash_pid << " written to: " << paths.text;
     } else {
-      if (crash->crash_type == kDebuggerdJavaBacktrace) {
-        LOG(ERROR) << "Traces for pid " << crash->crash_pid << " written to: " << paths.text;
-      } else {
-        // NOTE: Several tools parse this log message to figure out where the
-        // tombstone associated with a given native crash was written. Any changes
-        // to this message must be carefully considered.
-        LOG(ERROR) << "Tombstone written to: " << paths.text;
-      }
+      // NOTE: Several tools parse this log message to figure out where the
+      // tombstone associated with a given native crash was written. Any changes
+      // to this message must be carefully considered.
+      LOG(ERROR) << "Tombstone written to: " << paths.text;
     }
   }
 
   if (crash->output.proto && crash->output.proto->fd != -1) {
     if (!paths.proto) {
       LOG(ERROR) << "missing path for proto tombstone";
-    } else if (!link_fd(crash->output.proto->fd, queue->dir_fd(), *paths.proto)) {
-      LOG(ERROR) << "failed to link proto tombstone";
+    } else {
+      rename_tombstone_fd(crash->output.proto->fd, queue->dir_fd(), *paths.proto);
     }
   }
 
@@ -509,7 +504,7 @@
 }
 
 int main(int, char* []) {
-  umask(0137);
+  umask(0117);
 
   // Don't try to connect to ourselves if we crash.
   struct sigaction action = {};
diff --git a/fastboot/device/commands.cpp b/fastboot/device/commands.cpp
index 2b2a0bf..b2b6a9e 100644
--- a/fastboot/device/commands.cpp
+++ b/fastboot/device/commands.cpp
@@ -659,7 +659,7 @@
             return device->WriteFail("No snapshot merge is in progress");
         }
 
-        auto sm = SnapshotManager::NewForFirstStageMount();
+        auto sm = SnapshotManager::New();
         if (!sm) {
             return device->WriteFail("Unable to create SnapshotManager");
         }
diff --git a/fs_mgr/libfs_avb/fs_avb.cpp b/fs_mgr/libfs_avb/fs_avb.cpp
index 49333a1..1da7117 100644
--- a/fs_mgr/libfs_avb/fs_avb.cpp
+++ b/fs_mgr/libfs_avb/fs_avb.cpp
@@ -433,6 +433,16 @@
     // Sets the MAJOR.MINOR for init to set it into "ro.boot.avb_version".
     avb_handle->avb_version_ = StringPrintf("%d.%d", AVB_VERSION_MAJOR, AVB_VERSION_MINOR);
 
+    // Verifies vbmeta structs against the digest passed from bootloader in kernel cmdline.
+    std::unique_ptr<AvbVerifier> avb_verifier = AvbVerifier::Create();
+    if (!avb_verifier || !avb_verifier->VerifyVbmetaImages(avb_handle->vbmeta_images_)) {
+        LERROR << "Failed to verify vbmeta digest";
+        if (!allow_verification_error) {
+            LERROR << "vbmeta digest error isn't allowed ";
+            return nullptr;
+        }
+    }
+
     // Checks whether FLAGS_VERIFICATION_DISABLED is set:
     //   - Only the top-level vbmeta struct is read.
     //   - vbmeta struct in other partitions are NOT processed, including AVB HASH descriptor(s)
@@ -443,26 +453,16 @@
     bool verification_disabled = ((AvbVBMetaImageFlags)vbmeta_header.flags &
                                   AVB_VBMETA_IMAGE_FLAGS_VERIFICATION_DISABLED);
 
+    // Checks whether FLAGS_HASHTREE_DISABLED is set.
+    //   - vbmeta struct in all partitions are still processed, just disable
+    //     dm-verity in the user space.
+    bool hashtree_disabled =
+            ((AvbVBMetaImageFlags)vbmeta_header.flags & AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED);
+
     if (verification_disabled) {
         avb_handle->status_ = AvbHandleStatus::kVerificationDisabled;
-    } else {
-        // Verifies vbmeta structs against the digest passed from bootloader in kernel cmdline.
-        std::unique_ptr<AvbVerifier> avb_verifier = AvbVerifier::Create();
-        if (!avb_verifier) {
-            LERROR << "Failed to create AvbVerifier";
-            return nullptr;
-        }
-        if (!avb_verifier->VerifyVbmetaImages(avb_handle->vbmeta_images_)) {
-            LERROR << "VerifyVbmetaImages failed";
-            return nullptr;
-        }
-
-        // Checks whether FLAGS_HASHTREE_DISABLED is set.
-        bool hashtree_disabled = ((AvbVBMetaImageFlags)vbmeta_header.flags &
-                                  AVB_VBMETA_IMAGE_FLAGS_HASHTREE_DISABLED);
-        if (hashtree_disabled) {
-            avb_handle->status_ = AvbHandleStatus::kHashtreeDisabled;
-        }
+    } else if (hashtree_disabled) {
+        avb_handle->status_ = AvbHandleStatus::kHashtreeDisabled;
     }
 
     LINFO << "Returning avb_handle with status: " << avb_handle->status_;
diff --git a/fs_mgr/libsnapshot/Android.bp b/fs_mgr/libsnapshot/Android.bp
index d36a7f0..678adf8 100644
--- a/fs_mgr/libsnapshot/Android.bp
+++ b/fs_mgr/libsnapshot/Android.bp
@@ -549,6 +549,7 @@
     ],
     srcs: [
         "cow_snapuserd_test.cpp",
+        "snapuserd.cpp",
     ],
     cflags: [
         "-Wall",
diff --git a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
index 36e1169..42bff14 100644
--- a/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
+++ b/fs_mgr/libsnapshot/android/snapshot/snapshot.proto
@@ -156,7 +156,7 @@
     MergePhase merge_phase = 6;
 }
 
-// Next: 4
+// Next: 5
 message SnapshotMergeReport {
     // Status of the update after the merge attempts.
     UpdateState state = 1;
@@ -167,4 +167,7 @@
 
     // Total size of all the COW images before the update.
     uint64 cow_file_size = 3;
+
+    // Whether compression/dm-user was used for any snapshots.
+    bool compression_enabled = 4;
 }
diff --git a/fs_mgr/libsnapshot/cow_reader.cpp b/fs_mgr/libsnapshot/cow_reader.cpp
index c15a05b..75c05d1 100644
--- a/fs_mgr/libsnapshot/cow_reader.cpp
+++ b/fs_mgr/libsnapshot/cow_reader.cpp
@@ -181,6 +181,7 @@
         ops_buffer->resize(current_op_num);
     }
 
+    LOG(DEBUG) << "COW file read complete. Total ops: " << ops_buffer->size();
     // To successfully parse a COW file, we need either:
     //  (1) a label to read up to, and for that label to be found, or
     //  (2) a valid footer.
@@ -298,10 +299,9 @@
     // are contiguous. These are monotonically increasing numbers.
     //
     // When both (1) and (2) are true, kernel will batch merge the operations.
-    // However, we do not want copy operations to be batch merged as
-    // a crash or system reboot during an overlapping copy can drive the device
-    // to a corrupted state. Hence, merging of copy operations should always be
-    // done as a individual 4k block. In the above case, since the
+    // In the above case, we have to ensure that the copy operations
+    // are merged first before replace operations are done. Hence,
+    // we will not change the order of copy operations. Since,
     // cow_op->new_block numbers are contiguous, we will ensure that the
     // cow block numbers assigned in ReadMetadata() for these respective copy
     // operations are not contiguous forcing kernel to issue merge for each
@@ -328,10 +328,8 @@
     //
     // Merge sequence will look like:
     //
-    // Merge-1 - Copy-op-1
-    // Merge-2 - Copy-op-2
-    // Merge-3 - Copy-op-3
-    // Merge-4 - Batch-merge {Replace-op-7, Replace-op-6, Zero-op-8,
+    // Merge-1 - Batch-merge { Copy-op-1, Copy-op-2, Copy-op-3 }
+    // Merge-2 - Batch-merge {Replace-op-7, Replace-op-6, Zero-op-8,
     //                        Replace-op-4, Zero-op-9, Replace-op-5 }
     //==============================================================
 
diff --git a/fs_mgr/libsnapshot/cow_snapuserd_test.cpp b/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
index 7fa23db..045d9db 100644
--- a/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
+++ b/fs_mgr/libsnapshot/cow_snapuserd_test.cpp
@@ -36,6 +36,8 @@
 #include <libsnapshot/snapuserd_client.h>
 #include <storage_literals/storage_literals.h>
 
+#include "snapuserd.h"
+
 namespace android {
 namespace snapshot {
 
@@ -119,7 +121,6 @@
     void CreateDmUserDevice();
     void StartSnapuserdDaemon();
     void CreateSnapshotDevice();
-    unique_fd CreateTempFile(const std::string& name, size_t size);
 
     unique_ptr<LoopDevice> base_loop_;
     unique_ptr<TempDevice> dmuser_dev_;
@@ -140,7 +141,24 @@
     int total_base_size_;
 };
 
-unique_fd CowSnapuserdTest::CreateTempFile(const std::string& name, size_t size) {
+class CowSnapuserdMetadataTest final {
+  public:
+    void Setup();
+    void SetupPartialArea();
+    void ValidateMetadata();
+    void ValidatePartialFilledArea();
+
+  private:
+    void InitMetadata();
+    void CreateCowDevice();
+    void CreateCowPartialFilledArea();
+
+    std::unique_ptr<Snapuserd> snapuserd_;
+    std::unique_ptr<TemporaryFile> cow_system_;
+    size_t size_ = 1_MiB;
+};
+
+static unique_fd CreateTempFile(const std::string& name, size_t size) {
     unique_fd fd(syscall(__NR_memfd_create, name.c_str(), MFD_ALLOW_SEALING));
     if (fd < 0) {
         return {};
@@ -430,25 +448,299 @@
 }
 
 void CowSnapuserdTest::MergeInterrupt() {
+    // Interrupt merge at various intervals
     StartMerge();
-    std::this_thread::sleep_for(4s);
+    std::this_thread::sleep_for(250ms);
     SimulateDaemonRestart();
 
     StartMerge();
-    std::this_thread::sleep_for(3s);
+    std::this_thread::sleep_for(250ms);
     SimulateDaemonRestart();
 
     StartMerge();
-    std::this_thread::sleep_for(3s);
+    std::this_thread::sleep_for(150ms);
     SimulateDaemonRestart();
 
     StartMerge();
-    std::this_thread::sleep_for(1s);
+    std::this_thread::sleep_for(100ms);
+    SimulateDaemonRestart();
+
+    StartMerge();
+    std::this_thread::sleep_for(800ms);
+    SimulateDaemonRestart();
+
+    StartMerge();
+    std::this_thread::sleep_for(600ms);
     SimulateDaemonRestart();
 
     ASSERT_TRUE(Merge());
 }
 
+void CowSnapuserdMetadataTest::CreateCowPartialFilledArea() {
+    std::string path = android::base::GetExecutableDirectory();
+    cow_system_ = std::make_unique<TemporaryFile>(path);
+
+    CowOptions options;
+    options.compression = "gz";
+    CowWriter writer(options);
+
+    ASSERT_TRUE(writer.Initialize(cow_system_->fd));
+
+    // Area 0 is completely filled with 256 exceptions
+    for (int i = 0; i < 256; i++) {
+        ASSERT_TRUE(writer.AddCopy(i, 256 + i));
+    }
+
+    // Area 1 is partially filled with 2 copy ops and 10 zero ops
+    ASSERT_TRUE(writer.AddCopy(500, 1000));
+    ASSERT_TRUE(writer.AddCopy(501, 1001));
+
+    ASSERT_TRUE(writer.AddZeroBlocks(300, 10));
+
+    // Flush operations
+    ASSERT_TRUE(writer.Finalize());
+}
+
+void CowSnapuserdMetadataTest::ValidatePartialFilledArea() {
+    int area_sz = snapuserd_->GetMetadataAreaSize();
+
+    ASSERT_EQ(area_sz, 2);
+
+    size_t new_chunk = 263;
+    // Verify the partially filled area
+    void* buffer = snapuserd_->GetExceptionBuffer(1);
+    loff_t offset = 0;
+    struct disk_exception* de;
+    for (int i = 0; i < 12; i++) {
+        de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+        ASSERT_EQ(de->old_chunk, i);
+        ASSERT_EQ(de->new_chunk, new_chunk);
+        offset += sizeof(struct disk_exception);
+        new_chunk += 1;
+    }
+
+    de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+    ASSERT_EQ(de->old_chunk, 0);
+    ASSERT_EQ(de->new_chunk, 0);
+}
+
+void CowSnapuserdMetadataTest::SetupPartialArea() {
+    CreateCowPartialFilledArea();
+    InitMetadata();
+}
+
+void CowSnapuserdMetadataTest::CreateCowDevice() {
+    unique_fd rnd_fd;
+    loff_t offset = 0;
+
+    std::string path = android::base::GetExecutableDirectory();
+    cow_system_ = std::make_unique<TemporaryFile>(path);
+
+    rnd_fd.reset(open("/dev/random", O_RDONLY));
+    ASSERT_TRUE(rnd_fd > 0);
+
+    std::unique_ptr<uint8_t[]> random_buffer_1_ = std::make_unique<uint8_t[]>(size_);
+
+    // Fill random data
+    for (size_t j = 0; j < (size_ / 1_MiB); j++) {
+        ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer_1_.get() + offset, 1_MiB, 0),
+                  true);
+
+        offset += 1_MiB;
+    }
+
+    CowOptions options;
+    options.compression = "gz";
+    CowWriter writer(options);
+
+    ASSERT_TRUE(writer.Initialize(cow_system_->fd));
+
+    size_t num_blocks = size_ / options.block_size;
+
+    // Overlapping region. This has to be split
+    // into two batch operations
+    ASSERT_TRUE(writer.AddCopy(23, 20));
+    ASSERT_TRUE(writer.AddCopy(22, 19));
+    ASSERT_TRUE(writer.AddCopy(21, 18));
+    ASSERT_TRUE(writer.AddCopy(20, 17));
+    ASSERT_TRUE(writer.AddCopy(19, 16));
+    ASSERT_TRUE(writer.AddCopy(18, 15));
+
+    // Contiguous region but blocks in ascending order
+    // Daemon has to ensure that these blocks are merged
+    // in a batch
+    ASSERT_TRUE(writer.AddCopy(50, 75));
+    ASSERT_TRUE(writer.AddCopy(51, 76));
+    ASSERT_TRUE(writer.AddCopy(52, 77));
+    ASSERT_TRUE(writer.AddCopy(53, 78));
+
+    // Dis-contiguous region
+    ASSERT_TRUE(writer.AddCopy(110, 130));
+    ASSERT_TRUE(writer.AddCopy(105, 125));
+    ASSERT_TRUE(writer.AddCopy(100, 120));
+
+    // Overlap
+    ASSERT_TRUE(writer.AddCopy(25, 30));
+    ASSERT_TRUE(writer.AddCopy(30, 31));
+
+    size_t source_blk = num_blocks;
+
+    ASSERT_TRUE(writer.AddRawBlocks(source_blk, random_buffer_1_.get(), size_));
+
+    size_t blk_zero_copy_start = source_blk + num_blocks;
+
+    ASSERT_TRUE(writer.AddZeroBlocks(blk_zero_copy_start, num_blocks));
+
+    // Flush operations
+    ASSERT_TRUE(writer.Finalize());
+}
+
+void CowSnapuserdMetadataTest::InitMetadata() {
+    snapuserd_ = std::make_unique<Snapuserd>("", cow_system_->path, "");
+    ASSERT_TRUE(snapuserd_->InitCowDevice());
+}
+
+void CowSnapuserdMetadataTest::Setup() {
+    CreateCowDevice();
+    InitMetadata();
+}
+
+void CowSnapuserdMetadataTest::ValidateMetadata() {
+    int area_sz = snapuserd_->GetMetadataAreaSize();
+    ASSERT_EQ(area_sz, 3);
+
+    size_t old_chunk;
+    size_t new_chunk;
+
+    for (int i = 0; i < area_sz; i++) {
+        void* buffer = snapuserd_->GetExceptionBuffer(i);
+        loff_t offset = 0;
+        if (i == 0) {
+            old_chunk = 256;
+            new_chunk = 2;
+        } else if (i == 1) {
+            old_chunk = 512;
+            new_chunk = 259;
+        }
+        for (int j = 0; j < 256; j++) {
+            struct disk_exception* de =
+                    reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+
+            if (i != 2) {
+                ASSERT_EQ(de->old_chunk, old_chunk);
+                ASSERT_EQ(de->new_chunk, new_chunk);
+                old_chunk += 1;
+                new_chunk += 1;
+            } else {
+                break;
+            }
+            offset += sizeof(struct disk_exception);
+        }
+
+        if (i == 2) {
+            // The first 5 copy operation is not batch merged
+            // as the sequence is discontiguous
+            struct disk_exception* de =
+                    reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 30);
+            ASSERT_EQ(de->new_chunk, 518);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 25);
+            ASSERT_EQ(de->new_chunk, 520);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 100);
+            ASSERT_EQ(de->new_chunk, 522);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 105);
+            ASSERT_EQ(de->new_chunk, 524);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 110);
+            ASSERT_EQ(de->new_chunk, 526);
+            offset += sizeof(struct disk_exception);
+
+            // The next 4 operations are batch merged as
+            // both old and new chunk are contiguous
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 50);
+            ASSERT_EQ(de->new_chunk, 528);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 51);
+            ASSERT_EQ(de->new_chunk, 529);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 52);
+            ASSERT_EQ(de->new_chunk, 530);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 53);
+            ASSERT_EQ(de->new_chunk, 531);
+            offset += sizeof(struct disk_exception);
+
+            // This is handling overlap operation with
+            // two batch merge operations.
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 18);
+            ASSERT_EQ(de->new_chunk, 533);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 19);
+            ASSERT_EQ(de->new_chunk, 534);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 20);
+            ASSERT_EQ(de->new_chunk, 535);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 21);
+            ASSERT_EQ(de->new_chunk, 537);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 22);
+            ASSERT_EQ(de->new_chunk, 538);
+            offset += sizeof(struct disk_exception);
+
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 23);
+            ASSERT_EQ(de->new_chunk, 539);
+            offset += sizeof(struct disk_exception);
+
+            // End of metadata
+            de = reinterpret_cast<struct disk_exception*>((char*)buffer + offset);
+            ASSERT_EQ(de->old_chunk, 0);
+            ASSERT_EQ(de->new_chunk, 0);
+            offset += sizeof(struct disk_exception);
+        }
+    }
+}
+
+TEST(Snapuserd_Test, Snapshot_Metadata) {
+    CowSnapuserdMetadataTest harness;
+    harness.Setup();
+    harness.ValidateMetadata();
+}
+
+TEST(Snapuserd_Test, Snapshot_Metadata_Overlap) {
+    CowSnapuserdMetadataTest harness;
+    harness.SetupPartialArea();
+    harness.ValidatePartialFilledArea();
+}
+
 TEST(Snapuserd_Test, Snapshot_Merge_Resume) {
     CowSnapuserdTest harness;
     ASSERT_TRUE(harness.Setup());
@@ -457,7 +749,7 @@
     harness.Shutdown();
 }
 
-TEST(Snapuserd_Test, Snapshot) {
+TEST(Snapuserd_Test, Snapshot_IO_TEST) {
     CowSnapuserdTest harness;
     ASSERT_TRUE(harness.Setup());
     harness.ReadSnapshotDeviceAndValidate();
@@ -465,7 +757,6 @@
     harness.ValidateMerge();
     harness.Shutdown();
 }
-
 }  // namespace snapshot
 }  // namespace android
 
diff --git a/fs_mgr/libsnapshot/cow_writer.cpp b/fs_mgr/libsnapshot/cow_writer.cpp
index c1a5f32..81edc79 100644
--- a/fs_mgr/libsnapshot/cow_writer.cpp
+++ b/fs_mgr/libsnapshot/cow_writer.cpp
@@ -491,7 +491,7 @@
     return true;
 }
 
-bool CowWriter::CommitMerge(int merged_ops, bool sync) {
+bool CowWriter::CommitMerge(int merged_ops) {
     CHECK(merge_in_progress_);
     header_.num_merge_ops += merged_ops;
 
@@ -506,11 +506,7 @@
         return false;
     }
 
-    // Sync only for merging of copy operations.
-    if (sync) {
-        return Sync();
-    }
-    return true;
+    return Sync();
 }
 
 bool CowWriter::Truncate(off_t length) {
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
index 22ddfa6..6ffd5d8 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/cow_writer.h
@@ -101,7 +101,7 @@
     bool InitializeAppend(android::base::borrowed_fd fd, uint64_t label);
 
     void InitializeMerge(android::base::borrowed_fd fd, CowHeader* header);
-    bool CommitMerge(int merged_ops, bool sync);
+    bool CommitMerge(int merged_ops);
 
     bool Finalize() override;
 
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/mock_snapshot.h b/fs_mgr/libsnapshot/include/libsnapshot/mock_snapshot.h
index 92e7910..1e420cb 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/mock_snapshot.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/mock_snapshot.h
@@ -32,6 +32,7 @@
                 (const std::function<bool()>& callback, const std::function<bool()>& before_cancel),
                 (override));
     MOCK_METHOD(UpdateState, GetUpdateState, (double* progress), (override));
+    MOCK_METHOD(bool, UpdateUsesCompression, (), (override));
     MOCK_METHOD(Return, CreateUpdateSnapshots,
                 (const chromeos_update_engine::DeltaArchiveManifest& manifest), (override));
     MOCK_METHOD(bool, MapUpdateSnapshot,
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
index ff7a727..0d90f6c 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot.h
@@ -170,6 +170,10 @@
     //   Other: 0
     virtual UpdateState GetUpdateState(double* progress = nullptr) = 0;
 
+    // Returns true if compression is enabled for the current update. This always returns false if
+    // UpdateState is None, or no snapshots have been created.
+    virtual bool UpdateUsesCompression() = 0;
+
     // Create necessary COW device / files for OTA clients. New logical partitions will be added to
     // group "cow" in target_metadata. Regions of partitions of current_metadata will be
     // "write-protected" and snapshotted.
@@ -326,6 +330,7 @@
     UpdateState ProcessUpdateState(const std::function<bool()>& callback = {},
                                    const std::function<bool()>& before_cancel = {}) override;
     UpdateState GetUpdateState(double* progress = nullptr) override;
+    bool UpdateUsesCompression() override;
     Return CreateUpdateSnapshots(const DeltaArchiveManifest& manifest) override;
     bool MapUpdateSnapshot(const CreateLogicalPartitionParams& params,
                            std::string* snapshot_path) override;
@@ -720,6 +725,9 @@
 
     SnapuserdClient* snapuserd_client() const { return snapuserd_client_.get(); }
 
+    // Helper of UpdateUsesCompression
+    bool UpdateUsesCompression(LockedFile* lock);
+
     std::string gsid_dir_;
     std::string metadata_dir_;
     std::unique_ptr<IDeviceInfo> device_;
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stats.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stats.h
index d691d4f..96d2deb 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stats.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stats.h
@@ -28,7 +28,7 @@
     virtual ~ISnapshotMergeStats() = default;
     // Called when merge starts or resumes.
     virtual bool Start() = 0;
-    virtual void set_state(android::snapshot::UpdateState state) = 0;
+    virtual void set_state(android::snapshot::UpdateState state, bool using_compression) = 0;
     virtual void set_cow_file_size(uint64_t cow_file_size) = 0;
     virtual uint64_t cow_file_size() = 0;
 
@@ -51,7 +51,7 @@
 
     // ISnapshotMergeStats overrides
     bool Start() override;
-    void set_state(android::snapshot::UpdateState state) override;
+    void set_state(android::snapshot::UpdateState state, bool using_compression) override;
     void set_cow_file_size(uint64_t cow_file_size) override;
     uint64_t cow_file_size() override;
     std::unique_ptr<Result> Finish() override;
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stub.h b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stub.h
index cba3560..3365ceb 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stub.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapshot_stub.h
@@ -32,6 +32,7 @@
     UpdateState ProcessUpdateState(const std::function<bool()>& callback = {},
                                    const std::function<bool()>& before_cancel = {}) override;
     UpdateState GetUpdateState(double* progress = nullptr) override;
+    bool UpdateUsesCompression() override;
     Return CreateUpdateSnapshots(
             const chromeos_update_engine::DeltaArchiveManifest& manifest) override;
     bool MapUpdateSnapshot(const android::fs_mgr::CreateLogicalPartitionParams& params,
diff --git a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
index 7941e68..2b6c8ef 100644
--- a/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
+++ b/fs_mgr/libsnapshot/include/libsnapshot/snapuserd_kernel.h
@@ -47,8 +47,8 @@
 static constexpr uint32_t CHUNK_SIZE = 8;
 static constexpr uint32_t CHUNK_SHIFT = (__builtin_ffs(CHUNK_SIZE) - 1);
 
-static constexpr uint32_t BLOCK_SIZE = 4096;
-static constexpr uint32_t BLOCK_SHIFT = (__builtin_ffs(BLOCK_SIZE) - 1);
+static constexpr uint32_t BLOCK_SZ = 4096;
+static constexpr uint32_t BLOCK_SHIFT = (__builtin_ffs(BLOCK_SZ) - 1);
 
 #define DIV_ROUND_UP(n, d) (((n) + (d)-1) / (d))
 
diff --git a/fs_mgr/libsnapshot/snapshot.cpp b/fs_mgr/libsnapshot/snapshot.cpp
index 9329725..eb3a501 100644
--- a/fs_mgr/libsnapshot/snapshot.cpp
+++ b/fs_mgr/libsnapshot/snapshot.cpp
@@ -94,7 +94,11 @@
     if (!info) {
         info = new DeviceInfo();
     }
-    return std::unique_ptr<SnapshotManager>(new SnapshotManager(info));
+    auto sm = std::unique_ptr<SnapshotManager>(new SnapshotManager(info));
+    if (info->IsRecovery()) {
+        sm->ForceLocalImageManager();
+    }
+    return sm;
 }
 
 std::unique_ptr<SnapshotManager> SnapshotManager::NewForFirstStageMount(IDeviceInfo* info) {
@@ -1230,6 +1234,25 @@
     return true;
 }
 
+static bool DeleteDmDevice(const std::string& name, const std::chrono::milliseconds& timeout_ms) {
+    auto start = std::chrono::steady_clock::now();
+    auto& dm = DeviceMapper::Instance();
+    while (true) {
+        if (dm.DeleteDeviceIfExists(name)) {
+            break;
+        }
+        auto now = std::chrono::steady_clock::now();
+        auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(now - start);
+        if (elapsed >= timeout_ms) {
+            LOG(ERROR) << "DeleteDevice timeout: " << name;
+            return false;
+        }
+        std::this_thread::sleep_for(250ms);
+    }
+
+    return true;
+}
+
 bool SnapshotManager::CollapseSnapshotDevice(const std::string& name,
                                              const SnapshotStatus& status) {
     auto& dm = DeviceMapper::Instance();
@@ -1288,10 +1311,11 @@
     if (!dm.DeleteDeviceIfExists(base_name)) {
         LOG(ERROR) << "Unable to delete base device for snapshot: " << base_name;
     }
-    auto source_name = GetSourceDeviceName(name);
-    if (!dm.DeleteDeviceIfExists(source_name)) {
-        LOG(ERROR) << "Unable to delete source device for snapshot: " << source_name;
+
+    if (!DeleteDmDevice(GetSourceDeviceName(name), 4000ms)) {
+        LOG(ERROR) << "Unable to delete source device for snapshot: " << GetSourceDeviceName(name);
     }
+
     return true;
 }
 
@@ -1383,9 +1407,6 @@
         }
 
         auto misc_name = user_cow_name;
-        if (transition == InitTransition::SELINUX_DETACH) {
-            misc_name += "-selinux";
-        }
 
         DmTable table;
         table.Emplace<DmTargetUser>(0, target.spec.length, misc_name);
@@ -1418,6 +1439,7 @@
         // Wait for ueventd to acknowledge and create the control device node.
         std::string control_device = "/dev/dm-user/" + misc_name;
         if (!WaitForDevice(control_device, 10s)) {
+            LOG(ERROR) << "dm-user control device no found:  " << misc_name;
             continue;
         }
 
@@ -1683,6 +1705,17 @@
     return state;
 }
 
+bool SnapshotManager::UpdateUsesCompression() {
+    auto lock = LockShared();
+    if (!lock) return false;
+    return UpdateUsesCompression(lock.get());
+}
+
+bool SnapshotManager::UpdateUsesCompression(LockedFile* lock) {
+    SnapshotUpdateStatus update_status = ReadSnapshotUpdateStatus(lock);
+    return update_status.compression_enabled();
+}
+
 bool SnapshotManager::ListSnapshots(LockedFile* lock, std::vector<std::string>* snapshots) {
     CHECK(lock);
 
@@ -2107,15 +2140,12 @@
     CHECK(lock);
     if (!EnsureImageManager()) return false;
 
-    auto& dm = DeviceMapper::Instance();
-
-    if (IsCompressionEnabled() && !UnmapDmUserDevice(name)) {
+    if (UpdateUsesCompression(lock) && !UnmapDmUserDevice(name)) {
         return false;
     }
 
-    auto cow_name = GetCowName(name);
-    if (!dm.DeleteDeviceIfExists(cow_name)) {
-        LOG(ERROR) << "Cannot unmap " << cow_name;
+    if (!DeleteDmDevice(GetCowName(name), 4000ms)) {
+        LOG(ERROR) << "Cannot unmap: " << GetCowName(name);
         return false;
     }
 
@@ -2140,12 +2170,11 @@
         return false;
     }
 
-    if (!EnsureSnapuserdConnected()) {
-        return false;
-    }
-    if (!snapuserd_client_->WaitForDeviceDelete(dm_user_name)) {
-        LOG(ERROR) << "Failed to wait for " << dm_user_name << " control device to delete";
-        return false;
+    if (EnsureSnapuserdConnected()) {
+        if (!snapuserd_client_->WaitForDeviceDelete(dm_user_name)) {
+            LOG(ERROR) << "Failed to wait for " << dm_user_name << " control device to delete";
+            return false;
+        }
     }
 
     // Ensure the control device is gone so we don't run into ABA problems.
@@ -3087,7 +3116,8 @@
     std::stringstream ss;
 
     ss << "Update state: " << ReadUpdateState(file.get()) << std::endl;
-
+    ss << "Compression: " << ReadSnapshotUpdateStatus(file.get()).compression_enabled()
+       << std::endl;
     ss << "Current slot: " << device_->GetSlotSuffix() << std::endl;
     ss << "Boot indicator: booting from " << GetCurrentSlot() << " slot" << std::endl;
     ss << "Rollback indicator: "
@@ -3169,7 +3199,7 @@
 
     auto slot_number = SlotNumberForSlotSuffix(device_->GetSlotSuffix());
     auto super_path = device_->GetSuperDevice(slot_number);
-    if (!CreateLogicalAndSnapshotPartitions(super_path)) {
+    if (!CreateLogicalAndSnapshotPartitions(super_path, 20s)) {
         LOG(ERROR) << "Unable to map partitions to complete merge.";
         return false;
     }
@@ -3209,7 +3239,7 @@
 
     auto slot_number = SlotNumberForSlotSuffix(device_->GetSlotSuffix());
     auto super_path = device_->GetSuperDevice(slot_number);
-    if (!CreateLogicalAndSnapshotPartitions(super_path)) {
+    if (!CreateLogicalAndSnapshotPartitions(super_path, 20s)) {
         LOG(ERROR) << "Unable to map partitions to complete merge.";
         return false;
     }
@@ -3355,7 +3385,7 @@
     auto slot_suffix = device_->GetOtherSlotSuffix();
     auto slot_number = SlotNumberForSlotSuffix(slot_suffix);
     auto super_path = device_->GetSuperDevice(slot_number);
-    if (!CreateLogicalAndSnapshotPartitions(super_path)) {
+    if (!CreateLogicalAndSnapshotPartitions(super_path, 20s)) {
         LOG(ERROR) << "Unable to map partitions.";
         return CreateResult::ERROR;
     }
diff --git a/fs_mgr/libsnapshot/snapshot_stats.cpp b/fs_mgr/libsnapshot/snapshot_stats.cpp
index 3723730..513700d 100644
--- a/fs_mgr/libsnapshot/snapshot_stats.cpp
+++ b/fs_mgr/libsnapshot/snapshot_stats.cpp
@@ -84,8 +84,9 @@
     return WriteState();
 }
 
-void SnapshotMergeStats::set_state(android::snapshot::UpdateState state) {
+void SnapshotMergeStats::set_state(android::snapshot::UpdateState state, bool using_compression) {
     report_.set_state(state);
+    report_.set_compression_enabled(using_compression);
 }
 
 void SnapshotMergeStats::set_cow_file_size(uint64_t cow_file_size) {
diff --git a/fs_mgr/libsnapshot/snapshot_stub.cpp b/fs_mgr/libsnapshot/snapshot_stub.cpp
index 26b9129..8a254c9 100644
--- a/fs_mgr/libsnapshot/snapshot_stub.cpp
+++ b/fs_mgr/libsnapshot/snapshot_stub.cpp
@@ -116,9 +116,14 @@
     return nullptr;
 }
 
+bool SnapshotManagerStub::UpdateUsesCompression() {
+    LOG(ERROR) << __FUNCTION__ << " should never be called.";
+    return false;
+}
+
 class SnapshotMergeStatsStub : public ISnapshotMergeStats {
     bool Start() override { return false; }
-    void set_state(android::snapshot::UpdateState) override {}
+    void set_state(android::snapshot::UpdateState, bool) override {}
     void set_cow_file_size(uint64_t) override {}
     uint64_t cow_file_size() override { return 0; }
     std::unique_ptr<Result> Finish() override { return nullptr; }
diff --git a/fs_mgr/libsnapshot/snapshot_test.cpp b/fs_mgr/libsnapshot/snapshot_test.cpp
index 95e7d89..d57aa6c 100644
--- a/fs_mgr/libsnapshot/snapshot_test.cpp
+++ b/fs_mgr/libsnapshot/snapshot_test.cpp
@@ -1160,6 +1160,11 @@
 // Test that shrinking and growing partitions at the same time is handled
 // correctly in VABC.
 TEST_F(SnapshotUpdateTest, SpaceSwapUpdate) {
+    if (!IsCompressionEnabled()) {
+        // b/179111359
+        GTEST_SKIP() << "Skipping Virtual A/B Compression test";
+    }
+
     // OTA client blindly unmaps all partitions that are possibly mapped.
     for (const auto& name : {"sys_b", "vnd_b", "prd_b"}) {
         ASSERT_TRUE(sm->UnmapUpdateSnapshot(name));
diff --git a/fs_mgr/libsnapshot/snapuserd.cpp b/fs_mgr/libsnapshot/snapuserd.cpp
index ceba8ab..0a02bd0 100644
--- a/fs_mgr/libsnapshot/snapuserd.cpp
+++ b/fs_mgr/libsnapshot/snapuserd.cpp
@@ -17,6 +17,8 @@
 #include "snapuserd.h"
 
 #include <csignal>
+#include <optional>
+#include <set>
 
 #include <libsnapshot/snapuserd_client.h>
 
@@ -32,7 +34,7 @@
 
 static constexpr size_t PAYLOAD_SIZE = (1UL << 20);
 
-static_assert(PAYLOAD_SIZE >= BLOCK_SIZE);
+static_assert(PAYLOAD_SIZE >= BLOCK_SZ);
 
 void BufferSink::Initialize(size_t size) {
     buffer_size_ = size;
@@ -78,10 +80,10 @@
 // request will always be 4k. After constructing
 // the header, zero out the remaining block.
 void Snapuserd::ConstructKernelCowHeader() {
-    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
     CHECK(buffer != nullptr);
 
-    memset(buffer, 0, BLOCK_SIZE);
+    memset(buffer, 0, BLOCK_SZ);
 
     struct disk_header* dh = reinterpret_cast<struct disk_header*>(buffer);
 
@@ -106,13 +108,13 @@
 // Start the copy operation. This will read the backing
 // block device which is represented by cow_op->source.
 bool Snapuserd::ProcessCopyOp(const CowOperation* cow_op) {
-    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
     CHECK(buffer != nullptr);
 
     // Issue a single 4K IO. However, this can be optimized
     // if the successive blocks are contiguous.
-    if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SIZE,
-                                          cow_op->source * BLOCK_SIZE)) {
+    if (!android::base::ReadFullyAtOffset(backing_store_fd_, buffer, BLOCK_SZ,
+                                          cow_op->source * BLOCK_SZ)) {
         SNAP_PLOG(ERROR) << "Copy-op failed. Read from backing store: " << backing_store_device_
                          << "at block :" << cow_op->source;
         return false;
@@ -123,10 +125,10 @@
 
 bool Snapuserd::ProcessZeroOp() {
     // Zero out the entire block
-    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SIZE);
+    void* buffer = bufsink_.GetPayloadBuffer(BLOCK_SZ);
     CHECK(buffer != nullptr);
 
-    memset(buffer, 0, BLOCK_SIZE);
+    memset(buffer, 0, BLOCK_SZ);
     return true;
 }
 
@@ -173,11 +175,11 @@
         struct dm_user_message* msg = (struct dm_user_message*)(&(buffer[0]));
 
         memmove(msg->payload.buf, (char*)msg->payload.buf + skip_sector_size,
-                (BLOCK_SIZE - skip_sector_size));
+                (BLOCK_SZ - skip_sector_size));
     }
 
     bufsink_.ResetBufferOffset();
-    return std::min(size, (BLOCK_SIZE - skip_sector_size));
+    return std::min(size, (BLOCK_SZ - skip_sector_size));
 }
 
 /*
@@ -234,7 +236,7 @@
         return ReadUnalignedSector(sector, size, it);
     }
 
-    int num_ops = DIV_ROUND_UP(size, BLOCK_SIZE);
+    int num_ops = DIV_ROUND_UP(size, BLOCK_SZ);
     while (num_ops) {
         if (!ProcessCowOp(it->second)) {
             return -1;
@@ -242,7 +244,7 @@
         num_ops -= 1;
         it++;
         // Update the buffer offset
-        bufsink_.UpdateBufferOffset(BLOCK_SIZE);
+        bufsink_.UpdateBufferOffset(BLOCK_SZ);
 
         SNAP_LOG(DEBUG) << "ReadData at sector: " << sector << " size: " << size;
     }
@@ -344,7 +346,7 @@
 }
 
 int Snapuserd::GetNumberOfMergedOps(void* merged_buffer, void* unmerged_buffer, loff_t offset,
-                                    int unmerged_exceptions, bool* copy_op) {
+                                    int unmerged_exceptions) {
     int merged_ops_cur_iter = 0;
 
     // Find the operations which are merged in this cycle.
@@ -362,10 +364,8 @@
             offset += sizeof(struct disk_exception);
             const CowOperation* cow_op = chunk_map_[ChunkToSector(cow_de->new_chunk)];
             CHECK(cow_op != nullptr);
+
             CHECK(cow_op->new_block == cow_de->old_chunk);
-            if (cow_op->type == kCowCopyOp) {
-                *copy_op = true;
-            }
             // zero out to indicate that operation is merged.
             cow_de->old_chunk = 0;
             cow_de->new_chunk = 0;
@@ -389,12 +389,6 @@
             return -1;
         }
     }
-
-    if (*copy_op) {
-        SNAP_LOG(ERROR) << "Invalid batch merge of copy ops: merged_ops_cur_iter: "
-                        << merged_ops_cur_iter;
-        CHECK(merged_ops_cur_iter == 1);
-    }
     return merged_ops_cur_iter;
 }
 
@@ -416,42 +410,15 @@
     int unmerged_exceptions = 0;
     loff_t offset = GetMergeStartOffset(buffer, vec_[divresult.quot].get(), &unmerged_exceptions);
 
-    bool copy_op = false;
-    // Check if the merged operation is a copy operation. If so, then we need
-    // to explicitly sync the metadata before initiating the next merge.
-    // For ex: Consider a following sequence of copy operations in the COW file:
-    //
-    // Op-1: Copy 2 -> 3
-    // Op-2: Copy 1 -> 2
-    // Op-3: Copy 5 -> 10
-    //
-    // Op-1 and Op-2 are overlapping copy operations. The merge sequence will
-    // look like:
-    //
-    // Merge op-1: Copy 2 -> 3
-    // Merge op-2: Copy 1 -> 2
-    // Merge op-3: Copy 5 -> 10
-    //
-    // Now, let's say we have a crash _after_ Merge op-2; Block 2 contents would
-    // have been over-written by Block-1 after merge op-2. During next reboot,
-    // kernel will request the metadata for all the un-merged blocks. If we had
-    // not sync the metadata after Merge-op 1 and Merge op-2, snapuser daemon
-    // will think that these merge operations are still pending and hence will
-    // inform the kernel that Op-1 and Op-2 are un-merged blocks. When kernel
-    // resumes back the merging process, it will attempt to redo the Merge op-1
-    // once again. However, block 2 contents are wrong as it has the contents
-    // of block 1 from previous merge cycle. Although, merge will silently succeed,
-    // this will lead to silent data corruption.
-    //
-    int merged_ops_cur_iter = GetNumberOfMergedOps(buffer, vec_[divresult.quot].get(), offset,
-                                                   unmerged_exceptions, &copy_op);
+    int merged_ops_cur_iter =
+            GetNumberOfMergedOps(buffer, vec_[divresult.quot].get(), offset, unmerged_exceptions);
 
     // There should be at least one operation merged in this cycle
     CHECK(merged_ops_cur_iter > 0);
 
     header.num_merge_ops += merged_ops_cur_iter;
     reader_->UpdateMergeProgress(merged_ops_cur_iter);
-    if (!writer_->CommitMerge(merged_ops_cur_iter, copy_op)) {
+    if (!writer_->CommitMerge(merged_ops_cur_iter)) {
         SNAP_LOG(ERROR) << "CommitMerge failed... merged_ops_cur_iter: " << merged_ops_cur_iter;
         return false;
     }
@@ -508,12 +475,13 @@
  *      during merge; specifically when the merge operation has dependency.
  *      These dependencies can only happen during copy operations.
  *
- *      To avoid this problem, we make sure that no two copy-operations
- *      do not have contiguous chunk IDs. Additionally, we make sure
- *      that each copy operation is merged individually.
+ *      To avoid this problem, we make sure overlap copy operations
+ *      are not batch merged.
  * 6: Use a monotonically increasing chunk number to assign the
  *    new_chunk
- * 7: Each chunk-id represents either a: Metadata page or b: Data page
+ * 7: Each chunk-id represents either
+ *        a: Metadata page or
+ *        b: Data page
  * 8: Chunk-id representing a data page is stored in a map.
  * 9: Chunk-id representing a metadata page is converted into a vector
  *    index. We store this in vector as kernel requests metadata during
@@ -533,10 +501,10 @@
     reader_ = std::make_unique<CowReader>();
     CowHeader header;
     CowOptions options;
-    bool prev_copy_op = false;
     bool metadata_found = false;
+    int replace_ops = 0, zero_ops = 0, copy_ops = 0;
 
-    SNAP_LOG(DEBUG) << "ReadMetadata Start...";
+    SNAP_LOG(DEBUG) << "ReadMetadata: Parsing cow file";
 
     if (!reader_->Parse(cow_fd_)) {
         SNAP_LOG(ERROR) << "Failed to parse";
@@ -548,10 +516,10 @@
         return false;
     }
 
-    CHECK(header.block_size == BLOCK_SIZE);
+    CHECK(header.block_size == BLOCK_SZ);
 
-    SNAP_LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
     reader_->InitializeMerge();
+    SNAP_LOG(DEBUG) << "Merge-ops: " << header.num_merge_ops;
 
     writer_ = std::make_unique<CowWriter>(options);
     writer_->InitializeMerge(cow_fd_.get(), &header);
@@ -586,17 +554,26 @@
         }
 
         metadata_found = true;
-        if ((cow_op->type == kCowCopyOp || prev_copy_op)) {
+        // This loop will handle all the replace and zero ops.
+        // We will handle the copy ops later as it requires special
+        // handling of assigning chunk-id's. Furthermore, we make
+        // sure that replace/zero and copy ops are not batch merged; hence,
+        // the bump in the chunk_id before break of this loop
+        if (cow_op->type == kCowCopyOp) {
             data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+            break;
         }
 
-        prev_copy_op = (cow_op->type == kCowCopyOp);
+        if (cow_op->type == kCowReplaceOp) {
+            replace_ops++;
+        } else if (cow_op->type == kCowZeroOp) {
+            zero_ops++;
+        }
 
         // Construct the disk-exception
         de->old_chunk = cow_op->new_block;
         de->new_chunk = data_chunk_id;
 
-        SNAP_LOG(DEBUG) << "Old-chunk: " << de->old_chunk << "New-chunk: " << de->new_chunk;
 
         // Store operation pointer.
         chunk_map_[ChunkToSector(data_chunk_id)] = cow_op;
@@ -604,6 +581,9 @@
         offset += sizeof(struct disk_exception);
         cowop_riter_->Next();
 
+        SNAP_LOG(DEBUG) << num_ops << ":"
+                        << " Old-chunk: " << de->old_chunk << " New-chunk: " << de->new_chunk;
+
         if (num_ops == exceptions_per_area_) {
             // Store it in vector at the right index. This maps the chunk-id to
             // vector index.
@@ -618,13 +598,213 @@
 
             if (cowop_riter_->Done()) {
                 vec_.push_back(std::move(de_ptr));
-                SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
             }
         }
 
         data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
     }
 
+    std::optional<chunk_t> prev_id = {};
+    std::map<uint64_t, const CowOperation*> map;
+    std::set<uint64_t> dest_blocks;
+    size_t pending_copy_ops = exceptions_per_area_ - num_ops;
+    SNAP_LOG(INFO) << " Processing copy-ops at Area: " << vec_.size()
+                   << " Number of replace/zero ops completed in this area: " << num_ops
+                   << " Pending copy ops for this area: " << pending_copy_ops;
+    while (!cowop_riter_->Done()) {
+        do {
+            const CowOperation* cow_op = &cowop_riter_->Get();
+            if (IsMetadataOp(*cow_op)) {
+                cowop_riter_->Next();
+                continue;
+            }
+
+            // We have two cases specific cases:
+            //
+            // =====================================================
+            // Case 1: Overlapping copy regions
+            //
+            // Ex:
+            //
+            // Source -> Destination
+            //
+            // 1: 15 -> 18
+            // 2: 16 -> 19
+            // 3: 17 -> 20
+            // 4: 18 -> 21
+            // 5: 19 -> 22
+            // 6: 20 -> 23
+            //
+            // We have 6 copy operations to be executed in OTA and there is a overlap. Update-engine
+            // will write to COW file as follows:
+            //
+            // Op-1: 20 -> 23
+            // Op-2: 19 -> 22
+            // Op-3: 18 -> 21
+            // Op-4: 17 -> 20
+            // Op-5: 16 -> 19
+            // Op-6: 15 -> 18
+            //
+            // Note that the blocks numbers are contiguous. Hence, all 6 copy
+            // operations can potentially be batch merged. However, that will be
+            // problematic if we have a crash as block 20, 19, 18 would have
+            // been overwritten and hence subsequent recovery may end up with
+            // a silent data corruption when op-1, op-2 and op-3 are
+            // re-executed.
+            //
+            // We will split these 6 operations into two batches viz:
+            //
+            // Batch-1:
+            // ===================
+            // Op-1: 20 -> 23
+            // Op-2: 19 -> 22
+            // Op-3: 18 -> 21
+            // ===================
+            //
+            // Batch-2:
+            // ==================
+            // Op-4: 17 -> 20
+            // Op-5: 16 -> 19
+            // Op-6: 15 -> 18
+            // ==================
+            //
+            // Now, merge sequence will look like:
+            //
+            // 1: Merge Batch-1 { op-1, op-2, op-3 }
+            // 2: Update Metadata in COW File that op-1, op-2, op-3 merge is
+            // done.
+            // 3: Merge Batch-2
+            // 4: Update Metadata in COW File that op-4, op-5, op-6 merge is
+            // done.
+            //
+            // Note, that the order of block operations are still the same.
+            // However, we have two batch merge operations. Any crash between
+            // either of this sequence should be safe as each of these
+            // batches are self-contained.
+            //
+            //===========================================================
+            //
+            // Case 2:
+            //
+            // Let's say we have three copy operations written to COW file
+            // in the following order:
+            //
+            // op-1: 15 -> 18
+            // op-2: 16 -> 19
+            // op-3: 17 -> 20
+            //
+            // As aforementioned, kernel will initiate merge in reverse order.
+            // Hence, we will read these ops in reverse order so that all these
+            // ops are exectued in the same order as requested. Thus, we will
+            // read the metadata in reverse order and for the kernel it will
+            // look like:
+            //
+            // op-3: 17 -> 20
+            // op-2: 16 -> 19
+            // op-1: 15 -> 18   <-- Merge starts here in the kernel
+            //
+            // Now, this is problematic as kernel cannot batch merge them.
+            //
+            // Merge sequence will look like:
+            //
+            // Merge-1: op-1: 15 -> 18
+            // Merge-2: op-2: 16 -> 19
+            // Merge-3: op-3: 17 -> 20
+            //
+            // We have three merge operations.
+            //
+            // Even though the blocks are contiguous, kernel can batch merge
+            // them if the blocks are in descending order. Update engine
+            // addresses this issue partially for overlapping operations as
+            // we see that op-1 to op-3 and op-4 to op-6 operatiosn are in
+            // descending order. However, if the copy operations are not
+            // overlapping, update engine cannot write these blocks
+            // in descending order. Hence, we will try to address it.
+            // Thus, we will send these blocks to the kernel and it will
+            // look like:
+            //
+            // op-3: 15 -> 18
+            // op-2: 16 -> 19
+            // op-1: 17 -> 20  <-- Merge starts here in the kernel
+            //
+            // Now with this change, we can batch merge all these three
+            // operations. Merge sequence will look like:
+            //
+            // Merge-1: {op-1: 17 -> 20, op-2: 16 -> 19, op-3: 15 -> 18}
+            //
+            // Note that we have changed the ordering of merge; However, this
+            // is ok as each of these copy operations are independent and there
+            // is no overlap.
+            //
+            //===================================================================
+            if (prev_id.has_value()) {
+                chunk_t diff = (cow_op->new_block > prev_id.value())
+                                       ? (cow_op->new_block - prev_id.value())
+                                       : (prev_id.value() - cow_op->new_block);
+                if (diff != 1) {
+                    break;
+                }
+                if (dest_blocks.count(cow_op->new_block) || map.count(cow_op->source) > 0) {
+                    break;
+                }
+            }
+            metadata_found = true;
+            pending_copy_ops -= 1;
+            map[cow_op->new_block] = cow_op;
+            dest_blocks.insert(cow_op->source);
+            prev_id = cow_op->new_block;
+            cowop_riter_->Next();
+        } while (!cowop_riter_->Done() && pending_copy_ops);
+
+        data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+        SNAP_LOG(DEBUG) << "Batch Merge copy-ops of size: " << map.size()
+                        << " Area: " << vec_.size() << " Area offset: " << offset
+                        << " Pending-copy-ops in this area: " << pending_copy_ops;
+
+        for (auto it = map.begin(); it != map.end(); it++) {
+            struct disk_exception* de =
+                    reinterpret_cast<struct disk_exception*>((char*)de_ptr.get() + offset);
+            de->old_chunk = it->first;
+            de->new_chunk = data_chunk_id;
+
+            // Store operation pointer.
+            chunk_map_[ChunkToSector(data_chunk_id)] = it->second;
+            offset += sizeof(struct disk_exception);
+            num_ops += 1;
+            copy_ops++;
+
+            SNAP_LOG(DEBUG) << num_ops << ":"
+                            << " Copy-op: "
+                            << " Old-chunk: " << de->old_chunk << " New-chunk: " << de->new_chunk;
+
+            if (num_ops == exceptions_per_area_) {
+                // Store it in vector at the right index. This maps the chunk-id to
+                // vector index.
+                vec_.push_back(std::move(de_ptr));
+                num_ops = 0;
+                offset = 0;
+
+                // Create buffer for next area
+                de_ptr = std::make_unique<uint8_t[]>(exceptions_per_area_ *
+                                                     sizeof(struct disk_exception));
+                memset(de_ptr.get(), 0, (exceptions_per_area_ * sizeof(struct disk_exception)));
+
+                if (cowop_riter_->Done()) {
+                    vec_.push_back(std::move(de_ptr));
+                    SNAP_LOG(DEBUG) << "ReadMetadata() completed; Number of Areas: " << vec_.size();
+                }
+
+                CHECK(pending_copy_ops == 0);
+                pending_copy_ops = exceptions_per_area_;
+            }
+
+            data_chunk_id = GetNextAllocatableChunkId(data_chunk_id);
+        }
+        map.clear();
+        dest_blocks.clear();
+        prev_id.reset();
+    }
+
     // Partially filled area or there is no metadata
     // If there is no metadata, fill with zero so that kernel
     // is aware that merge is completed.
@@ -634,8 +814,11 @@
                         << "Areas : " << vec_.size();
     }
 
-    SNAP_LOG(DEBUG) << "ReadMetadata() completed. Final_chunk_id: " << data_chunk_id
-                    << "Num Sector: " << ChunkToSector(data_chunk_id);
+    SNAP_LOG(INFO) << "ReadMetadata completed. Final-chunk-id: " << data_chunk_id
+                   << " Num Sector: " << ChunkToSector(data_chunk_id)
+                   << " Replace-ops: " << replace_ops << " Zero-ops: " << zero_ops
+                   << " Copy-ops: " << copy_ops << " Areas: " << vec_.size()
+                   << " Num-ops-merged: " << header.num_merge_ops;
 
     // Total number of sectors required for creating dm-user device
     num_sectors_ = ChunkToSector(data_chunk_id);
@@ -744,7 +927,7 @@
 
     size_t remaining_size = header->len;
     size_t read_size = std::min(PAYLOAD_SIZE, remaining_size);
-    CHECK(read_size == BLOCK_SIZE);
+    CHECK(read_size == BLOCK_SZ);
 
     CHECK(header->sector > 0);
     chunk_t chunk = SectorToChunk(header->sector);
@@ -795,11 +978,11 @@
         // will always be a single 4k.
         if (header->sector == 0) {
             CHECK(metadata_read_done_ == true);
-            CHECK(read_size == BLOCK_SIZE);
+            CHECK(read_size == BLOCK_SZ);
             ConstructKernelCowHeader();
             SNAP_LOG(DEBUG) << "Kernel header constructed";
         } else {
-            if (!offset && (read_size == BLOCK_SIZE) &&
+            if (!offset && (read_size == BLOCK_SZ) &&
                 chunk_map_.find(header->sector) == chunk_map_.end()) {
                 if (!ReadDiskExceptions(chunk, read_size)) {
                     SNAP_LOG(ERROR) << "ReadDiskExceptions failed for chunk id: " << chunk
diff --git a/fs_mgr/libsnapshot/snapuserd.h b/fs_mgr/libsnapshot/snapuserd.h
index c01fee3..dba3186 100644
--- a/fs_mgr/libsnapshot/snapuserd.h
+++ b/fs_mgr/libsnapshot/snapuserd.h
@@ -70,6 +70,13 @@
     const std::string& GetMiscName() { return misc_name_; }
     uint64_t GetNumSectors() { return num_sectors_; }
     bool IsAttached() const { return ctrl_fd_ >= 0; }
+    void CloseFds() {
+        ctrl_fd_ = {};
+        cow_fd_ = {};
+        backing_store_fd_ = {};
+    }
+    size_t GetMetadataAreaSize() { return vec_.size(); }
+    void* GetExceptionBuffer(size_t i) { return vec_[i].get(); }
 
   private:
     bool DmuserReadRequest();
@@ -96,11 +103,11 @@
     loff_t GetMergeStartOffset(void* merged_buffer, void* unmerged_buffer,
                                int* unmerged_exceptions);
     int GetNumberOfMergedOps(void* merged_buffer, void* unmerged_buffer, loff_t offset,
-                             int unmerged_exceptions, bool* copy_op);
+                             int unmerged_exceptions);
     bool ProcessMergeComplete(chunk_t chunk, void* buffer);
     sector_t ChunkToSector(chunk_t chunk) { return chunk << CHUNK_SHIFT; }
     chunk_t SectorToChunk(sector_t sector) { return sector >> CHUNK_SHIFT; }
-    bool IsBlockAligned(int read_size) { return ((read_size & (BLOCK_SIZE - 1)) == 0); }
+    bool IsBlockAligned(int read_size) { return ((read_size & (BLOCK_SZ - 1)) == 0); }
 
     std::string cow_device_;
     std::string backing_store_device_;
diff --git a/fs_mgr/libsnapshot/snapuserd.rc b/fs_mgr/libsnapshot/snapuserd.rc
index f2d21ac..4bf34a2 100644
--- a/fs_mgr/libsnapshot/snapuserd.rc
+++ b/fs_mgr/libsnapshot/snapuserd.rc
@@ -4,3 +4,4 @@
     disabled
     user root
     group root system
+    seclabel u:r:snapuserd:s0
diff --git a/fs_mgr/libsnapshot/snapuserd_server.cpp b/fs_mgr/libsnapshot/snapuserd_server.cpp
index 38abaec..68a00a0 100644
--- a/fs_mgr/libsnapshot/snapuserd_server.cpp
+++ b/fs_mgr/libsnapshot/snapuserd_server.cpp
@@ -210,6 +210,8 @@
         }
     }
 
+    handler->snapuserd()->CloseFds();
+
     auto misc_name = handler->misc_name();
     LOG(INFO) << "Handler thread about to exit: " << misc_name;
 
diff --git a/fs_mgr/tests/adb-remount-test.sh b/fs_mgr/tests/adb-remount-test.sh
index 2433833..242fa93 100755
--- a/fs_mgr/tests/adb-remount-test.sh
+++ b/fs_mgr/tests/adb-remount-test.sh
@@ -213,6 +213,13 @@
     return ${ret}
 }
 
+[ "USAGE: adb_test <expression>
+
+Returns: exit status of the test expression" ]
+adb_test() {
+  adb_sh test "${@}" </dev/null
+}
+
 [ "USAGE: adb_reboot
 
 Returns: true if the reboot command succeeded" ]
@@ -844,6 +851,9 @@
   NORMAL=""
 fi
 
+# Set an ERR trap handler to report any unhandled error
+trap 'die "line ${LINENO}: unhandled error"' ERR
+
 if ${print_time}; then
   echo "${BLUE}[     INFO ]${NORMAL}" start `date` >&2
 fi
@@ -871,10 +881,10 @@
 [ -z "${D}" -o -n "${ANDROID_SERIAL}" ] || ANDROID_SERIAL=${D}
 USB_SERIAL=
 if [ -n "${ANDROID_SERIAL}" -a "Darwin" != "${HOSTOS}" ]; then
-  USB_SERIAL="`find /sys/devices -name serial | grep usb`"
+  USB_SERIAL="`find /sys/devices -name serial | grep usb || true`"
   if [ -n "${USB_SERIAL}" ]; then
     USB_SERIAL=`echo "${USB_SERIAL}" |
-                  xargs grep -l ${ANDROID_SERIAL}`
+                  xargs grep -l ${ANDROID_SERIAL} || true`
   fi
 fi
 USB_ADDRESS=
@@ -956,7 +966,7 @@
     if inAdb; then
       reboot=false
       for d in ${OVERLAYFS_BACKING}; do
-        if adb_su ls -d /${d}/overlay </dev/null >/dev/null 2>/dev/null; then
+        if adb_test -d /${d}/overlay; then
           adb_su rm -rf /${d}/overlay </dev/null
           reboot=true
         fi
@@ -1010,7 +1020,10 @@
 echo "${GREEN}[ RUN      ]${NORMAL} Testing kernel support for overlayfs" >&2
 
 adb_wait || die "wait for device failed"
-adb_sh ls -d /sys/module/overlay </dev/null >/dev/null 2>/dev/null ||
+adb_root ||
+  die "initial setup"
+
+adb_test -d /sys/module/overlay ||
   adb_sh grep "nodev${TAB}overlay" /proc/filesystems </dev/null >/dev/null 2>/dev/null &&
   echo "${GREEN}[       OK ]${NORMAL} overlay module present" >&2 ||
   (
@@ -1019,7 +1032,7 @@
   ) ||
   overlayfs_supported=false
 if ${overlayfs_supported}; then
-  adb_su ls /sys/module/overlay/parameters/override_creds </dev/null >/dev/null 2>/dev/null &&
+  adb_test -f /sys/module/overlay/parameters/override_creds &&
     echo "${GREEN}[       OK ]${NORMAL} overlay module supports override_creds" >&2 ||
     case `adb_sh uname -r </dev/null` in
       4.[456789].* | 4.[1-9][0-9]* | [56789].*)
@@ -1032,9 +1045,6 @@
     esac
 fi
 
-adb_root ||
-  die "initial setup"
-
 echo "${GREEN}[ RUN      ]${NORMAL} Checking current overlayfs status" >&2
 
 # We can not universally use adb enable-verity to ensure device is
@@ -1044,7 +1054,7 @@
 # having to go through enable-verity transition.
 reboot=false
 for d in ${OVERLAYFS_BACKING}; do
-  if adb_sh ls -d /${d}/overlay </dev/null >/dev/null 2>/dev/null; then
+  if adb_test -d /${d}/overlay; then
     echo "${YELLOW}[  WARNING ]${NORMAL} /${d}/overlay is setup, surgically wiping" >&2
     adb_sh rm -rf /${d}/overlay </dev/null ||
       die "/${d}/overlay wipe"
@@ -1171,7 +1181,7 @@
 
 # Feed log with selinux denials as baseline before overlays
 adb_unroot
-adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null
+adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null || true
 adb_root
 
 D=`adb remount 2>&1`
@@ -1220,7 +1230,7 @@
     die "scratch size"
   echo "${BLUE}[     INFO ]${NORMAL} scratch size ${scratch_size}KB" >&2
   for d in ${OVERLAYFS_BACKING}; do
-    if adb_sh ls -d /${d}/overlay/system/upper </dev/null >/dev/null 2>/dev/null; then
+    if adb_test -d /${d}/overlay/system/upper; then
       echo "${BLUE}[     INFO ]${NORMAL} /${d}/overlay is setup" >&2
     fi
   done
@@ -1305,7 +1315,7 @@
 [ -n "${VENDOR_DEVT%[0-9a-fA-F][0-9a-fA-F]}" ] ||
   echo "${YELLOW}[  WARNING ]${NORMAL} vendor devt ${VENDOR_DEVT} major 0" >&2
 
-# Download libc.so, append some gargage, push back, and check if the file
+# Download libc.so, append some garbage, push back, and check if the file
 # is updated.
 tempdir="`mktemp -d`"
 cleanup() {
@@ -1313,8 +1323,8 @@
 }
 adb pull /system/lib/bootstrap/libc.so ${tempdir} >/dev/null ||
   die "pull libc.so from device"
-garbage="`hexdump -n 16 -e '4/4 "%08X" 1 "\n"' /dev/random`"
-echo ${garbage} >> ${tempdir}/libc.so
+garbage="D105225BBFCB1EB8AB8EBDB7094646F0"
+echo "${garbage}" >> ${tempdir}/libc.so
 adb push ${tempdir}/libc.so /system/lib/bootstrap/libc.so >/dev/null ||
   die "push libc.so to device"
 adb pull /system/lib/bootstrap/libc.so ${tempdir}/libc.so.fromdevice >/dev/null ||
@@ -1358,7 +1368,7 @@
   echo "${GREEN}[       OK ]${NORMAL} /vendor content correct MAC after reboot" >&2
   # Feed unprivileged log with selinux denials as a result of overlays
   wait_for_screen
-  adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null
+  adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null || true
 fi
 # If overlayfs has a nested security problem, this will fail.
 B="`adb_ls /system/`" ||
@@ -1385,7 +1395,7 @@
 check_eq "${BASE_SYSTEM_DEVT}" "`adb_sh stat --format=%D /system/xbin/su </dev/null`" --warning devt for su after reboot
 
 # Feed log with selinux denials as a result of overlays
-adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null
+adb_sh find ${MOUNTS} </dev/null >/dev/null 2>/dev/null || true
 
 # Check if the updated libc.so is persistent after reboot.
 adb_root &&
@@ -1656,8 +1666,10 @@
 # This also saves a lot of 'noise' from the command doing a mkfs on backing
 # storage and all the related tuning and adjustment.
 for d in ${OVERLAYFS_BACKING}; do
-  adb_su rm -rf /${d}/overlay </dev/null ||
-    die "/${d}/overlay wipe"
+  if adb_test -d /${d}/overlay; then
+    adb_su rm -rf /${d}/overlay </dev/null ||
+      die "/${d}/overlay wipe"
+  fi
 done
 adb_reboot &&
   adb_wait ${ADB_WAIT} ||
diff --git a/gatekeeperd/Android.bp b/gatekeeperd/Android.bp
index 2d9a820..d1046df 100644
--- a/gatekeeperd/Android.bp
+++ b/gatekeeperd/Android.bp
@@ -41,7 +41,7 @@
         "libhidlbase",
         "android.hardware.gatekeeper@1.0",
         "libgatekeeper_aidl",
-        "android.hardware.security.keymint-unstable-ndk_platform",
+        "android.hardware.security.keymint-V1-ndk_platform",
         "android.security.authorization-ndk_platform",
     ],
 
diff --git a/gatekeeperd/gatekeeperd.cpp b/gatekeeperd/gatekeeperd.cpp
index 941f8c2..781b4af 100644
--- a/gatekeeperd/gatekeeperd.cpp
+++ b/gatekeeperd/gatekeeperd.cpp
@@ -327,7 +327,6 @@
                         LOG(ERROR) << "Failure in sending AuthToken to AuthorizationService.";
                         return GK_ERROR;
                     }
-                    AIBinder_decStrong(authzAIBinder);
                 }
                 sp<IServiceManager> sm = defaultServiceManager();
 
diff --git a/healthd/BatteryMonitor.cpp b/healthd/BatteryMonitor.cpp
index fd810cb..377acb7 100644
--- a/healthd/BatteryMonitor.cpp
+++ b/healthd/BatteryMonitor.cpp
@@ -349,9 +349,14 @@
 }
 
 void BatteryMonitor::logValues(void) {
+    logValues(*mHealthInfo, *mHealthdConfig);
+}
+
+void BatteryMonitor::logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
+                               const struct healthd_config& healthd_config) {
     char dmesgline[256];
     size_t len;
-    const HealthInfo_1_0& props = mHealthInfo->legacy.legacy;
+    const HealthInfo_1_0& props = health_info.legacy.legacy;
     if (props.batteryPresent) {
         snprintf(dmesgline, sizeof(dmesgline), "battery l=%d v=%d t=%s%d.%d h=%d st=%d",
                  props.batteryLevel, props.batteryVoltage, props.batteryTemperature < 0 ? "-" : "",
@@ -359,17 +364,17 @@
                  props.batteryHealth, props.batteryStatus);
 
         len = strlen(dmesgline);
-        if (!mHealthdConfig->batteryCurrentNowPath.isEmpty()) {
+        if (!healthd_config.batteryCurrentNowPath.isEmpty()) {
             len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " c=%d",
                             props.batteryCurrent);
         }
 
-        if (!mHealthdConfig->batteryFullChargePath.isEmpty()) {
+        if (!healthd_config.batteryFullChargePath.isEmpty()) {
             len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " fc=%d",
                             props.batteryFullCharge);
         }
 
-        if (!mHealthdConfig->batteryCycleCountPath.isEmpty()) {
+        if (!healthd_config.batteryCycleCountPath.isEmpty()) {
             len += snprintf(dmesgline + len, sizeof(dmesgline) - len, " cc=%d",
                             props.batteryCycleCount);
         }
diff --git a/healthd/include/healthd/BatteryMonitor.h b/healthd/include/healthd/BatteryMonitor.h
index fadb5a5..3cda727 100644
--- a/healthd/include/healthd/BatteryMonitor.h
+++ b/healthd/include/healthd/BatteryMonitor.h
@@ -66,6 +66,9 @@
     void logValues(void);
     bool isChargerOnline();
 
+    static void logValues(const android::hardware::health::V2_1::HealthInfo& health_info,
+                          const struct healthd_config& healthd_config);
+
   private:
     struct healthd_config *mHealthdConfig;
     Vector<String8> mChargerNames;
diff --git a/init/Android.bp b/init/Android.bp
index 06ecc59..b0a59b1 100644
--- a/init/Android.bp
+++ b/init/Android.bp
@@ -74,7 +74,6 @@
 
 cc_defaults {
     name: "init_defaults",
-    cpp_std: "experimental",
     sanitize: {
         misc_undefined: ["signed-integer-overflow"],
     },
@@ -336,7 +335,6 @@
 cc_binary {
     name: "host_init_verifier",
     host_supported: true,
-    cpp_std: "experimental",
     cflags: [
         "-Wall",
         "-Wextra",
diff --git a/init/README.ueventd.md b/init/README.ueventd.md
index 2a76620..3ffca88 100644
--- a/init/README.ueventd.md
+++ b/init/README.ueventd.md
@@ -151,8 +151,8 @@
 For boot time purposes, this is done in parallel across a set of child processes. `ueventd.cpp` in
 this directory contains documentation on how the parallelization is done.
 
-There is an option to parallelize the restorecon function during cold boot as well. This should only
-be done for devices that do not use genfscon, which is the recommended method for labeling sysfs
-nodes. To enable this option, use the below line in a ueventd.rc script:
+There is an option to parallelize the restorecon function during cold boot as well. It is
+recommended that devices use genfscon for labeling sysfs nodes. However, some devices may benefit
+from enabling the parallelization option:
 
     parallel_restorecon enabled
diff --git a/init/init.cpp b/init/init.cpp
index ca2d5da..b08037a 100644
--- a/init/init.cpp
+++ b/init/init.cpp
@@ -723,37 +723,6 @@
     }
 }
 
-static Result<void> TransitionSnapuserdAction(const BuiltinArguments&) {
-    if (!SnapshotManager::IsSnapshotManagerNeeded() ||
-        !android::base::GetBoolProperty(android::snapshot::kVirtualAbCompressionProp, false)) {
-        return {};
-    }
-
-    auto sm = SnapshotManager::New();
-    if (!sm) {
-        LOG(FATAL) << "Failed to create SnapshotManager, will not transition snapuserd";
-        return {};
-    }
-
-    ServiceList& service_list = ServiceList::GetInstance();
-    auto svc = service_list.FindService("snapuserd");
-    if (!svc) {
-        LOG(FATAL) << "Failed to find snapuserd service, aborting transition";
-        return {};
-    }
-    svc->Start();
-    svc->SetShutdownCritical();
-
-    if (!sm->PerformSecondStageInitTransition()) {
-        LOG(FATAL) << "Failed to transition snapuserd to second-stage";
-    }
-
-    if (auto pid = GetSnapuserdFirstStagePid()) {
-        KillFirstStageSnapuserd(pid.value());
-    }
-    return {};
-}
-
 int SecondStageMain(int argc, char** argv) {
     if (REBOOT_BOOTLOADER_ON_PANIC) {
         InstallRebootSignalHandlers();
@@ -900,9 +869,7 @@
 
     // Queue an action that waits for coldboot done so we know ueventd has set up all of /dev...
     am.QueueBuiltinAction(wait_for_coldboot_done_action, "wait_for_coldboot_done");
-    am.QueueBuiltinAction(TransitionSnapuserdAction, "TransitionSnapuserd");
     // ... so that we can start queuing up actions that require stuff from /dev.
-    am.QueueBuiltinAction(MixHwrngIntoLinuxRngAction, "MixHwrngIntoLinuxRng");
     am.QueueBuiltinAction(SetMmapRndBitsAction, "SetMmapRndBits");
     Keychords keychords;
     am.QueueBuiltinAction(
@@ -918,10 +885,6 @@
     // Trigger all the boot actions to get us started.
     am.QueueEventTrigger("init");
 
-    // Repeat mix_hwrng_into_linux_rng in case /dev/hw_random or /dev/random
-    // wasn't ready immediately after wait_for_coldboot_done
-    am.QueueBuiltinAction(MixHwrngIntoLinuxRngAction, "MixHwrngIntoLinuxRng");
-
     // Don't mount filesystems or start core system services in charger mode.
     std::string bootmode = GetProperty("ro.bootmode", "");
     if (bootmode == "charger") {
diff --git a/init/security.cpp b/init/security.cpp
index ac784a3..970696e 100644
--- a/init/security.cpp
+++ b/init/security.cpp
@@ -36,59 +36,6 @@
 namespace android {
 namespace init {
 
-// Writes 512 bytes of output from Hardware RNG (/dev/hw_random, backed
-// by Linux kernel's hw_random framework) into Linux RNG's via /dev/urandom.
-// Does nothing if Hardware RNG is not present.
-//
-// Since we don't yet trust the quality of Hardware RNG, these bytes are not
-// mixed into the primary pool of Linux RNG and the entropy estimate is left
-// unmodified.
-//
-// If the HW RNG device /dev/hw_random is present, we require that at least
-// 512 bytes read from it are written into Linux RNG. QA is expected to catch
-// devices/configurations where these I/O operations are blocking for a long
-// time. We do not reboot or halt on failures, as this is a best-effort
-// attempt.
-Result<void> MixHwrngIntoLinuxRngAction(const BuiltinArguments&) {
-    unique_fd hwrandom_fd(
-        TEMP_FAILURE_RETRY(open("/dev/hw_random", O_RDONLY | O_NOFOLLOW | O_CLOEXEC)));
-    if (hwrandom_fd == -1) {
-        if (errno == ENOENT) {
-            LOG(INFO) << "/dev/hw_random not found";
-            // It's not an error to not have a Hardware RNG.
-            return {};
-        }
-        return ErrnoError() << "Failed to open /dev/hw_random";
-    }
-
-    unique_fd urandom_fd(
-        TEMP_FAILURE_RETRY(open("/dev/urandom", O_WRONLY | O_NOFOLLOW | O_CLOEXEC)));
-    if (urandom_fd == -1) {
-        return ErrnoError() << "Failed to open /dev/urandom";
-    }
-
-    char buf[512];
-    size_t total_bytes_written = 0;
-    while (total_bytes_written < sizeof(buf)) {
-        ssize_t chunk_size =
-            TEMP_FAILURE_RETRY(read(hwrandom_fd, buf, sizeof(buf) - total_bytes_written));
-        if (chunk_size == -1) {
-            return ErrnoError() << "Failed to read from /dev/hw_random";
-        } else if (chunk_size == 0) {
-            return Error() << "Failed to read from /dev/hw_random: EOF";
-        }
-
-        chunk_size = TEMP_FAILURE_RETRY(write(urandom_fd, buf, chunk_size));
-        if (chunk_size == -1) {
-            return ErrnoError() << "Failed to write to /dev/urandom";
-        }
-        total_bytes_written += chunk_size;
-    }
-
-    LOG(INFO) << "Mixed " << total_bytes_written << " bytes from /dev/hw_random into /dev/urandom";
-    return {};
-}
-
 static bool SetHighestAvailableOptionValue(const std::string& path, int min, int max) {
     std::ifstream inf(path, std::fstream::in);
     if (!inf) {
diff --git a/init/security.h b/init/security.h
index 43c2739..e8bec6a 100644
--- a/init/security.h
+++ b/init/security.h
@@ -26,7 +26,6 @@
 namespace android {
 namespace init {
 
-Result<void> MixHwrngIntoLinuxRngAction(const BuiltinArguments&);
 Result<void> SetMmapRndBitsAction(const BuiltinArguments&);
 Result<void> SetKptrRestrictAction(const BuiltinArguments&);
 Result<void> TestPerfEventSelinuxAction(const BuiltinArguments&);
diff --git a/init/service.cpp b/init/service.cpp
index cfb8284..f6ce094 100644
--- a/init/service.cpp
+++ b/init/service.cpp
@@ -127,7 +127,8 @@
 
 static bool AreRuntimeApexesReady() {
     struct stat buf;
-    return stat("/apex/com.android.runtime/", &buf) == 0;
+    return stat("/apex/com.android.art/", &buf) == 0 &&
+           stat("/apex/com.android.runtime/", &buf) == 0;
 }
 
 unsigned long Service::next_start_order_ = 1;
diff --git a/libcutils/Android.bp b/libcutils/Android.bp
index d46aeab..c75e538 100644
--- a/libcutils/Android.bp
+++ b/libcutils/Android.bp
@@ -167,7 +167,6 @@
         "canned_fs_config.cpp",
         "iosched_policy.cpp",
         "load_file.cpp",
-        "memory.cpp",
         "native_handle.cpp",
         "properties.cpp",
         "record_stream.cpp",
diff --git a/libcutils/include/cutils/memory.h b/libcutils/include/cutils/memory.h
index 0fba53c..c6476c1 100644
--- a/libcutils/include/cutils/memory.h
+++ b/libcutils/include/cutils/memory.h
@@ -28,9 +28,6 @@
 size_t strlcpy(char *dst, const char *src, size_t size);
 #endif
 
-// Disables memory mitigations for the entire process, and logs appropriately.
-void process_disable_memory_mitigations();
-
 #ifdef __cplusplus
 } // extern "C"
 #endif
diff --git a/libcutils/memory.cpp b/libcutils/memory.cpp
deleted file mode 100644
index 5a410c2..0000000
--- a/libcutils/memory.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <cutils/memory.h>
-
-#include <log/log.h>
-
-#if !defined(__APPLE__)
-#include <malloc.h>
-#endif
-
-void process_disable_memory_mitigations() {
-    bool success = false;
-#ifdef __BIONIC__
-    success = mallopt(M_BIONIC_DISABLE_MEMORY_MITIGATIONS, 0);
-#endif
-
-    // TODO: if b/158870657 is fixed and scudo is used globally,
-    // we can assert on failure rather than just log.
-    if (success) {
-        ALOGI("Disabled memory mitigations for process.");
-    } else {
-        ALOGE("Could not disable memory mitigations for process.");
-    }
-}
diff --git a/libgrallocusage/OWNERS b/libgrallocusage/OWNERS
index 154dc6d..de2bf16 100644
--- a/libgrallocusage/OWNERS
+++ b/libgrallocusage/OWNERS
@@ -1,3 +1,2 @@
-jessehall@google.com
-olv@google.com
-stoza@google.com
+jreck@google.com
+lpy@google.com
diff --git a/libprocessgroup/cgrouprc/Android.bp b/libprocessgroup/cgrouprc/Android.bp
index bb59942..def069b 100644
--- a/libprocessgroup/cgrouprc/Android.bp
+++ b/libprocessgroup/cgrouprc/Android.bp
@@ -46,19 +46,19 @@
         "libcgrouprc_format",
     ],
     stubs: {
-        symbol_file: "libcgrouprc.llndk.txt",
+        symbol_file: "libcgrouprc.map.txt",
         versions: ["29"],
     },
     target: {
         linux: {
-            version_script: "libcgrouprc.llndk.txt",
+            version_script: "libcgrouprc.map.txt",
         },
     },
 }
 
 llndk_library {
     name: "libcgrouprc.llndk",
-    symbol_file: "libcgrouprc.llndk.txt",
+    symbol_file: "libcgrouprc.map.txt",
     native_bridge_supported: true,
     export_include_dirs: [
         "include",
diff --git a/libprocessgroup/cgrouprc/libcgrouprc.llndk.txt b/libprocessgroup/cgrouprc/libcgrouprc.map.txt
similarity index 100%
rename from libprocessgroup/cgrouprc/libcgrouprc.llndk.txt
rename to libprocessgroup/cgrouprc/libcgrouprc.map.txt
diff --git a/libprocessgroup/include/processgroup/processgroup.h b/libprocessgroup/include/processgroup/processgroup.h
index 4aa439a..1cadc9f 100644
--- a/libprocessgroup/include/processgroup/processgroup.h
+++ b/libprocessgroup/include/processgroup/processgroup.h
@@ -35,6 +35,8 @@
 #ifndef __ANDROID_VNDK__
 
 static constexpr const char* CGROUPS_RC_PATH = "/dev/cgroup_info/cgroup.rc";
+// Path to test against for freezer support
+static constexpr const char* CGROUP_FREEZE_PATH = "/sys/fs/cgroup/freezer/cgroup.freeze";
 
 bool UsePerAppMemcg();
 
diff --git a/libprocessgroup/processgroup.cpp b/libprocessgroup/processgroup.cpp
index 2bf48fc..d669ebe 100644
--- a/libprocessgroup/processgroup.cpp
+++ b/libprocessgroup/processgroup.cpp
@@ -131,25 +131,13 @@
     return StringPrintf("%s/uid_%d/pid_%d", cgroup, uid, pid);
 }
 
-static int RemoveProcessGroup(const char* cgroup, uid_t uid, int pid, unsigned int retries) {
-    int ret = 0;
+static int RemoveProcessGroup(const char* cgroup, uid_t uid, int pid) {
+    int ret;
+
     auto uid_pid_path = ConvertUidPidToPath(cgroup, uid, pid);
+    ret = rmdir(uid_pid_path.c_str());
+
     auto uid_path = ConvertUidToPath(cgroup, uid);
-
-    if (retries == 0) {
-        retries = 1;
-    }
-
-    while (retries--) {
-        ret = rmdir(uid_pid_path.c_str());
-        if (!ret || errno != EBUSY) break;
-        std::this_thread::sleep_for(5ms);
-    }
-
-    // With the exception of boot or shutdown, system uid_ folders are always populated. Spinning
-    // here would needlessly delay most pid removals. Additionally, once empty a uid_ cgroup won't
-    // have processes hanging on it (we've already spun for all its pid_), so there's no need to
-    // spin anyway.
     rmdir(uid_path.c_str());
 
     return ret;
@@ -188,7 +176,7 @@
     std::vector<std::string> cgroups;
     std::string path;
 
-    if (CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &path)) {
+    if (CgroupGetControllerPath("cpuacct", &path)) {
         cgroups.push_back(path);
     }
     if (CgroupGetControllerPath("memory", &path)) {
@@ -224,49 +212,19 @@
     }
 }
 
-/**
- * Process groups are primarily created by the Zygote, meaning that uid/pid groups are created by
- * the user root. Ownership for the newly created cgroup and all of its files must thus be
- * transferred for the user/group passed as uid/gid before system_server can properly access them.
- */
 static bool MkdirAndChown(const std::string& path, mode_t mode, uid_t uid, gid_t gid) {
     if (mkdir(path.c_str(), mode) == -1 && errno != EEXIST) {
         return false;
     }
 
-    auto dir = std::unique_ptr<DIR, decltype(&closedir)>(opendir(path.c_str()), closedir);
-
-    if (dir == NULL) {
-        PLOG(ERROR) << "opendir failed for " << path;
-        goto err;
-    }
-
-    struct dirent* dir_entry;
-    while ((dir_entry = readdir(dir.get()))) {
-        if (!strcmp("..", dir_entry->d_name)) {
-            continue;
-        }
-
-        std::string file_path = path + "/" + dir_entry->d_name;
-
-        if (lchown(file_path.c_str(), uid, gid) < 0) {
-            PLOG(ERROR) << "lchown failed for " << file_path;
-            goto err;
-        }
-
-        if (fchmodat(AT_FDCWD, file_path.c_str(), mode, AT_SYMLINK_NOFOLLOW) != 0) {
-            PLOG(ERROR) << "fchmodat failed for " << file_path;
-            goto err;
-        }
+    if (chown(path.c_str(), uid, gid) == -1) {
+        int saved_errno = errno;
+        rmdir(path.c_str());
+        errno = saved_errno;
+        return false;
     }
 
     return true;
-err:
-    int saved_errno = errno;
-    rmdir(path.c_str());
-    errno = saved_errno;
-
-    return false;
 }
 
 // Returns number of processes killed on success
@@ -344,9 +302,17 @@
 
 static int KillProcessGroup(uid_t uid, int initialPid, int signal, int retries,
                             int* max_processes) {
-    std::string hierarchy_root_path;
-    CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &hierarchy_root_path);
-    const char* cgroup = hierarchy_root_path.c_str();
+    std::string cpuacct_path;
+    std::string memory_path;
+
+    CgroupGetControllerPath("cpuacct", &cpuacct_path);
+    CgroupGetControllerPath("memory", &memory_path);
+    memory_path += "/apps";
+
+    const char* cgroup =
+            (!access(ConvertUidPidToPath(cpuacct_path.c_str(), uid, initialPid).c_str(), F_OK))
+                    ? cpuacct_path.c_str()
+                    : memory_path.c_str();
 
     std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
 
@@ -389,17 +355,7 @@
             LOG(INFO) << "Successfully killed process cgroup uid " << uid << " pid " << initialPid
                       << " in " << static_cast<int>(ms) << "ms";
         }
-
-        int err = RemoveProcessGroup(cgroup, uid, initialPid, retries);
-
-        if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
-            std::string memory_path;
-            CgroupGetControllerPath("memory", &memory_path);
-            memory_path += "/apps";
-            if (RemoveProcessGroup(memory_path.c_str(), uid, initialPid, retries)) return -1;
-        }
-
-        return err;
+        return RemoveProcessGroup(cgroup, uid, initialPid);
     } else {
         if (retries > 0) {
             LOG(ERROR) << "Failed to kill process cgroup uid " << uid << " pid " << initialPid
@@ -418,7 +374,15 @@
     return KillProcessGroup(uid, initialPid, signal, 0 /*retries*/, max_processes);
 }
 
-static int createProcessGroupInternal(uid_t uid, int initialPid, std::string cgroup) {
+int createProcessGroup(uid_t uid, int initialPid, bool memControl) {
+    std::string cgroup;
+    if (isMemoryCgroupSupported() && (memControl || UsePerAppMemcg())) {
+        CgroupGetControllerPath("memory", &cgroup);
+        cgroup += "/apps";
+    } else {
+        CgroupGetControllerPath("cpuacct", &cgroup);
+    }
+
     auto uid_path = ConvertUidToPath(cgroup.c_str(), uid);
 
     if (!MkdirAndChown(uid_path, 0750, AID_SYSTEM, AID_SYSTEM)) {
@@ -444,27 +408,6 @@
     return ret;
 }
 
-int createProcessGroup(uid_t uid, int initialPid, bool memControl) {
-    std::string cgroup;
-
-    if (memControl && !UsePerAppMemcg()) {
-        PLOG(ERROR) << "service memory controls are used without per-process memory cgroup support";
-        return -EINVAL;
-    }
-
-    if (isMemoryCgroupSupported() && UsePerAppMemcg()) {
-        CgroupGetControllerPath("memory", &cgroup);
-        cgroup += "/apps";
-        int ret = createProcessGroupInternal(uid, initialPid, cgroup);
-        if (ret != 0) {
-            return ret;
-        }
-    }
-
-    CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &cgroup);
-    return createProcessGroupInternal(uid, initialPid, cgroup);
-}
-
 static bool SetProcessGroupValue(int tid, const std::string& attr_name, int64_t value) {
     if (!isMemoryCgroupSupported()) {
         PLOG(ERROR) << "Memcg is not mounted.";
diff --git a/libprocessgroup/profiles/Android.bp b/libprocessgroup/profiles/Android.bp
index a496237..2d7bb5a 100644
--- a/libprocessgroup/profiles/Android.bp
+++ b/libprocessgroup/profiles/Android.bp
@@ -123,9 +123,6 @@
         "cgroups.recovery.json",
         "task_profiles.json",
     ],
-    test_suites: [
-        "general-tests",
-    ],
 }
 
 cc_test {
diff --git a/libprocessgroup/profiles/TEST_MAPPING b/libprocessgroup/profiles/TEST_MAPPING
deleted file mode 100644
index 5ff4112..0000000
--- a/libprocessgroup/profiles/TEST_MAPPING
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-  "presubmit": [
-    {
-      "name": "libprocessgroup_proto_test",
-      "host": true
-    }
-  ]
-}
diff --git a/libprocessgroup/profiles/cgroups.json b/libprocessgroup/profiles/cgroups.json
index 962d2ba..5b7a28a 100644
--- a/libprocessgroup/profiles/cgroups.json
+++ b/libprocessgroup/profiles/cgroups.json
@@ -15,6 +15,11 @@
       "GID": "system"
     },
     {
+      "Controller": "cpuacct",
+      "Path": "/acct",
+      "Mode": "0555"
+    },
+    {
       "Controller": "cpuset",
       "Path": "/dev/cpuset",
       "Mode": "0755",
@@ -37,7 +42,7 @@
     "Controllers": [
       {
         "Controller": "freezer",
-        "Path": ".",
+        "Path": "freezer",
         "Mode": "0755",
         "UID": "system",
         "GID": "system"
diff --git a/libprocessgroup/profiles/cgroups.recovery.json b/libprocessgroup/profiles/cgroups.recovery.json
index 2c63c08..f0bf5fd 100644
--- a/libprocessgroup/profiles/cgroups.recovery.json
+++ b/libprocessgroup/profiles/cgroups.recovery.json
@@ -1,2 +1,9 @@
 {
+  "Cgroups": [
+    {
+      "Controller": "cpuacct",
+      "Path": "/acct",
+      "Mode": "0555"
+    }
+  ]
 }
diff --git a/libprocessgroup/profiles/task_profiles.json b/libprocessgroup/profiles/task_profiles.json
index 628098b..5b57bdd 100644
--- a/libprocessgroup/profiles/task_profiles.json
+++ b/libprocessgroup/profiles/task_profiles.json
@@ -46,7 +46,7 @@
       "File": "cpu.uclamp.latency_sensitive"
     },
     {
-      "Name": "Freezer",
+      "Name": "FreezerState",
       "Controller": "freezer",
       "File": "cgroup.freeze"
     }
@@ -70,11 +70,11 @@
       "Name": "Frozen",
       "Actions": [
         {
-          "Name": "SetAttribute",
+          "Name": "JoinCgroup",
           "Params":
           {
-            "Name": "Freezer",
-            "Value": "1"
+            "Controller": "freezer",
+            "Path": ""
           }
         }
       ]
@@ -83,11 +83,11 @@
       "Name": "Unfrozen",
       "Actions": [
         {
-          "Name": "SetAttribute",
+          "Name": "JoinCgroup",
           "Params":
           {
-            "Name": "Freezer",
-            "Value": "0"
+            "Controller": "freezer",
+            "Path": "../"
           }
         }
       ]
diff --git a/libsystem/OWNERS b/libsystem/OWNERS
index 4f800d4..9bda04c 100644
--- a/libsystem/OWNERS
+++ b/libsystem/OWNERS
@@ -1,8 +1,6 @@
 # graphics/composer
 adyabr@google.com
 lpy@google.com
-stoza@google.com
-vhau@google.com
 
 # camera
 etalvala@google.com
diff --git a/llkd/libllkd.cpp b/llkd/libllkd.cpp
index a24d900..9f3e218 100644
--- a/llkd/libllkd.cpp
+++ b/llkd/libllkd.cpp
@@ -962,7 +962,7 @@
     //
     // This alarm is effectively the live lock detection of llkd, as
     // we understandably can not monitor ourselves otherwise.
-    ::alarm(duration_cast<seconds>(llkTimeoutMs * 2).count());
+    ::alarm(duration_cast<seconds>(llkTimeoutMs * 2 * android::base::TimeoutMultiplier()).count());
 
     // kernel jiffy precision fastest acquisition
     static timespec last;
diff --git a/property_service/libpropertyinfoparser/Android.bp b/property_service/libpropertyinfoparser/Android.bp
index 2d7e9cb..8777896 100644
--- a/property_service/libpropertyinfoparser/Android.bp
+++ b/property_service/libpropertyinfoparser/Android.bp
@@ -8,7 +8,6 @@
     native_bridge_supported: true,
     srcs: ["property_info_parser.cpp"],
 
-    cpp_std: "experimental",
     cppflags: [
         "-Wall",
         "-Wextra",
diff --git a/property_service/libpropertyinfoserializer/Android.bp b/property_service/libpropertyinfoserializer/Android.bp
index aa02a3a..f91bc37 100644
--- a/property_service/libpropertyinfoserializer/Android.bp
+++ b/property_service/libpropertyinfoserializer/Android.bp
@@ -1,7 +1,6 @@
 cc_defaults {
     name: "propertyinfoserializer_defaults",
     host_supported: true,
-    cpp_std: "experimental",
     cppflags: [
         "-Wall",
         "-Wextra",
diff --git a/property_service/property_info_checker/Android.bp b/property_service/property_info_checker/Android.bp
index 65e660a..66f378a 100644
--- a/property_service/property_info_checker/Android.bp
+++ b/property_service/property_info_checker/Android.bp
@@ -2,7 +2,6 @@
     name: "property_info_checker",
     host_supported: true,
     static_executable: true,
-    cpp_std: "experimental",
     static_libs: [
         "libpropertyinfoserializer",
         "libpropertyinfoparser",
diff --git a/rootdir/init.rc b/rootdir/init.rc
index 03af4f3..3cabecc 100644
--- a/rootdir/init.rc
+++ b/rootdir/init.rc
@@ -722,6 +722,8 @@
     mkdir /data/misc/trace 0700 root root
     # create location to store surface and window trace files
     mkdir /data/misc/wmtrace 0700 system system
+    # create location to store accessibility trace files
+    mkdir /data/misc/a11ytrace 0700 system system
     # profile file layout
     mkdir /data/misc/profiles 0771 system system
     mkdir /data/misc/profiles/cur 0771 system system
@@ -1169,6 +1171,8 @@
     chmod 0773 /data/misc/trace
     # Give reads to anyone for the window trace folder on debug builds.
     chmod 0775 /data/misc/wmtrace
+    # Give reads to anyone for the accessibility trace folder on debug builds.
+    chmod 0775 /data/misc/a11ytrace
 
 on init && property:ro.debuggable=1
     start console
diff --git a/rootdir/ueventd.rc b/rootdir/ueventd.rc
index e9293b5..65e29c1 100644
--- a/rootdir/ueventd.rc
+++ b/rootdir/ueventd.rc
@@ -37,8 +37,6 @@
 /dev/tty                  0666   root       root
 /dev/random               0666   root       root
 /dev/urandom              0666   root       root
-# Make HW RNG readable by group system to let EntropyMixer read it.
-/dev/hw_random            0440   root       system
 /dev/ashmem*              0666   root       root
 /dev/binder               0666   root       root
 /dev/hwbinder             0666   root       root
diff --git a/shell_and_utilities/README.md b/shell_and_utilities/README.md
index d169d29..f339553 100644
--- a/shell_and_utilities/README.md
+++ b/shell_and_utilities/README.md
@@ -26,7 +26,8 @@
 because the toolbox implementations did have bugs fixed and options added
 over the years. Gingerbread's rm, for example, supported `-r`/`-R` but not
 `-f`. But this gives you an idea of what was available in any given release,
-and how usable it was likely to be.
+and how usable it was likely to be. (**Bold** marks where we switched to toybox
+or first added something to toybox.)
 
 Also note that in any given release `toybox` probably contains more
 commands than there are symlinks for in `/system/bin`. You can get the
@@ -118,18 +119,18 @@
 toolbox: getevent iftop ioctl log nandread newfs\_msdos ps prlimit
 sendevent start stop top
 
-toybox (0.7.0-ish): acpi base64 basename blockdev bzcat cal cat chcon chgrp chmod
-chown chroot cksum clear comm cmp cp cpio cut date df dirname dmesg
-dos2unix du echo env expand expr fallocate false find flock free
+toybox (0.7.0-ish): acpi **base64** basename blockdev bzcat cal cat chcon chgrp chmod
+chown chroot cksum clear comm cmp cp cpio cut date **df** dirname dmesg
+dos2unix **du** echo env expand expr fallocate false find **flock** free
 getenforce getprop groups head hostname hwclock id ifconfig inotifyd
-insmod ionice iorenice kill killall load\_policy ln logname losetup ls
-lsmod lsof lsusb md5sum mkdir mknod mkswap mktemp modinfo more mount
+insmod **ionice** **iorenice** kill **killall** load\_policy ln logname losetup **ls**
+lsmod **lsof** lsusb md5sum mkdir mknod mkswap mktemp modinfo more *mount*
 mountpoint mv netstat nice nl nohup od paste patch pgrep pidof pkill
-pmap printenv printf pwd readlink realpath renice restorecon rm rmdir
+pmap printenv printf pwd readlink realpath **renice** restorecon rm rmdir
 rmmod route runcon sed seq setenforce setprop setsid sha1sum sleep sort
 split stat strings swapoff swapon sync sysctl tac tail tar taskset tee
-time timeout touch tr true truncate tty ulimit umount uname uniq unix2dos
-uptime usleep vmstat wc which whoami xargs xxd yes
+time timeout touch tr true truncate **tty** **ulimit** umount uname uniq unix2dos
+**uptime** usleep vmstat wc which whoami xargs **xxd** yes
 
 
 ## Android 8.0 (Oreo)
@@ -141,19 +142,19 @@
 toolbox: getevent newfs\_msdos
 
 toybox (0.7.3-ish): acpi base64 basename blockdev cal cat chcon chgrp chmod chown
-chroot chrt cksum clear cmp comm cp cpio cut date df diff dirname dmesg
-dos2unix du echo env expand expr fallocate false file find flock free
-getenforce getprop groups gunzip gzip head hostname hwclock id ifconfig
-inotifyd insmod ionice iorenice kill killall ln load\_policy log logname
-losetup ls lsmod lsof lspci lsusb md5sum microcom mkdir mkfifo mknod
-mkswap mktemp modinfo modprobe more mount mountpoint mv netstat nice
-nl nohup od paste patch pgrep pidof pkill pmap printenv printf ps pwd
-readlink realpath renice restorecon rm rmdir rmmod runcon sed sendevent
-seq setenforce setprop setsid sha1sum sha224sum sha256sum sha384sum
-sha512sum sleep sort split start stat stop strings swapoff swapon sync
-sysctl tac tail tar taskset tee time timeout top touch tr true truncate
-tty ulimit umount uname uniq unix2dos uptime usleep uudecode uuencode
-vmstat wc which whoami xargs xxd yes zcat
+chroot chrt cksum clear cmp comm cp cpio cut date df **diff** dirname dmesg
+dos2unix du echo env expand expr fallocate false **file** find flock free
+getenforce getprop groups **gunzip** **gzip** head hostname hwclock id ifconfig
+inotifyd insmod ionice iorenice kill killall ln load\_policy **log** logname
+losetup ls lsmod lsof **lspci** lsusb md5sum **microcom** mkdir **mkfifo** mknod
+mkswap mktemp modinfo **modprobe** more mount mountpoint mv netstat nice
+nl nohup od paste patch pgrep pidof pkill pmap printenv printf **ps** pwd
+readlink realpath renice restorecon rm rmdir rmmod runcon sed **sendevent**
+seq setenforce setprop setsid sha1sum **sha224sum** **sha256sum** **sha384sum**
+**sha512sum** sleep sort split start stat stop strings swapoff swapon sync
+sysctl tac tail tar taskset tee time timeout **top** touch tr true truncate
+tty ulimit umount uname uniq unix2dos uptime usleep **uudecode** **uuencode**
+vmstat wc which whoami xargs xxd yes **zcat**
 
 
 ## Android 9.0 (Pie)
@@ -168,7 +169,7 @@
 
 toybox (0.7.6-ish): acpi base64 basename blockdev cal cat chcon chgrp chmod chown
 chroot chrt cksum clear cmp comm cp cpio cut date df diff dirname dmesg
-dos2unix du echo env expand expr fallocate false file find flock fmt free
+dos2unix du echo env expand expr fallocate false file find flock **fmt** free
 getenforce groups gunzip gzip head hostname hwclock id ifconfig inotifyd
 insmod ionice iorenice kill killall ln load\_policy log logname losetup ls
 lsmod lsof lspci lsusb md5sum microcom mkdir mkfifo mknod mkswap mktemp
@@ -176,7 +177,7 @@
 patch pgrep pidof pkill pmap printenv printf ps pwd readlink realpath
 renice restorecon rm rmdir rmmod runcon sed sendevent seq setenforce
 setprop setsid sha1sum sha224sum sha256sum sha384sum sha512sum sleep
-sort split start stat stop strings stty swapoff swapon sync sysctl tac
+sort split start stat stop strings **stty** swapoff swapon sync sysctl tac
 tail tar taskset tee time timeout top touch tr true truncate tty ulimit
 umount uname uniq unix2dos uptime usleep uudecode uuencode vmstat wc
 which whoami xargs xxd yes zcat
@@ -192,24 +193,24 @@
 
 toolbox: getevent getprop
 
-toybox (0.8.0-ish): acpi base64 basename bc blkid blockdev cal cat chattr chcon chgrp
+toybox (0.8.0-ish): acpi base64 basename **bc** **blkid** blockdev cal cat **chattr** chcon chgrp
 chmod chown chroot chrt cksum clear cmp comm cp cpio cut date dd df
-diff dirname dmesg dos2unix du echo egrep env expand expr fallocate
-false fgrep file find flock fmt free freeramdisk fsfreeze getconf
-getenforce getfattr grep groups gunzip gzip head help hostname hwclock
-i2cdetect i2cdump i2cget i2cset iconv id ifconfig inotifyd insmod
-install ionice iorenice iotop kill killall ln load\_policy log logname
-losetup ls lsattr lsmod lsof lspci lsusb makedevs md5sum microcom
+diff dirname dmesg dos2unix du echo **egrep** env expand expr fallocate
+false **fgrep** file find flock fmt free **freeramdisk** **fsfreeze** **getconf**
+getenforce **getfattr** grep groups gunzip gzip head **help** hostname hwclock
+**i2cdetect** **i2cdump** **i2cget** **i2cset** **iconv** id ifconfig inotifyd insmod
+**install** ionice iorenice **iotop** kill killall ln load\_policy log logname
+losetup ls **lsattr** lsmod lsof lspci lsusb **makedevs** md5sum microcom
 mkdir mkfifo mknod mkswap mktemp modinfo modprobe more mount mountpoint
-mv nbd-client nc netcat netstat nice nl nohup nproc nsenter od partprobe
-paste patch pgrep pidof ping ping6 pivot\_root pkill pmap printenv
-printf prlimit ps pwd pwdx readlink realpath renice restorecon rev
-rfkill rm rmdir rmmod runcon sed sendevent seq setenforce setfattr
+mv **nbd-client** **nc** **netcat** netstat nice nl nohup **nproc** **nsenter** od **partprobe**
+paste patch pgrep pidof **ping** **ping6** **pivot\_root** pkill pmap printenv
+printf **prlimit** ps pwd **pwdx** readlink realpath renice restorecon **rev**
+**rfkill** rm rmdir rmmod runcon sed sendevent seq setenforce **setfattr**
 setprop setsid sha1sum sha224sum sha256sum sha384sum sha512sum sleep
 sort split start stat stop strings stty swapoff swapon sync sysctl
-tac tail tar taskset tee time timeout top touch tr traceroute traceroute6
-true truncate tty tunctl ulimit umount uname uniq unix2dos unlink
-unshare uptime usleep uudecode uuencode uuidgen vconfig vmstat watch
+tac tail tar taskset tee time timeout top touch tr **traceroute** **traceroute6**
+true truncate tty **tunctl** ulimit umount uname uniq unix2dos **unlink**
+**unshare** uptime usleep uudecode uuencode **uuidgen** **vconfig** vmstat **watch**
 wc which whoami xargs xxd yes zcat
 
 ## Android 11 ("R")
@@ -225,21 +226,53 @@
 toolbox: getevent getprop setprop start stop
 
 toybox (0.8.3-ish): acpi base64 basename blkid blockdev cal cat chattr chcon chgrp chmod
-chown chroot chrt cksum clear cmp comm cp cpio cut date dd devmem
+chown chroot chrt cksum clear cmp comm cp cpio cut date dd **devmem**
 df diff dirname dmesg dos2unix du echo egrep env expand expr fallocate
-false fgrep file find flock fmt free freeramdisk fsfreeze fsync getconf
-getenforce getfattr getopt grep groups gunzip gzip head help hostname
+false fgrep file find flock fmt free freeramdisk fsfreeze **fsync** getconf
+getenforce getfattr **getopt** grep groups gunzip gzip head help hostname
 hwclock i2cdetect i2cdump i2cget i2cset iconv id ifconfig inotifyd
 insmod install ionice iorenice iotop kill killall ln load\_policy log
 logname losetup ls lsattr lsmod lsof lspci lsusb makedevs md5sum microcom
 mkdir mkfifo mknod mkswap mktemp modinfo modprobe more mount mountpoint
 mv nbd-client nc netcat netstat nice nl nohup nproc nsenter od partprobe
 paste patch pgrep pidof ping ping6 pivot\_root pkill pmap printenv
-printf prlimit ps pwd pwdx readelf readlink realpath renice restorecon
+printf prlimit ps pwd pwdx **readelf** readlink realpath renice restorecon
 rev rfkill rm rmdir rmmod runcon sed sendevent seq setenforce setfattr
 setsid sha1sum sha224sum sha256sum sha384sum sha512sum sleep sort
 split stat strings stty swapoff swapon sync sysctl tac tail tar taskset
 tee time timeout top touch tr traceroute traceroute6 true truncate
 tty tunctl ulimit umount uname uniq unix2dos unlink unshare uptime
-usleep uudecode uuencode uuidgen vconfig vi vmstat watch wc which
+usleep uudecode uuencode uuidgen vconfig **vi** vmstat watch wc which
 whoami xargs xxd yes zcat
+
+## Android ("S")
+
+BSD: fsck\_msdos newfs\_msdos
+
+bzip2: bzcat bzip2 bunzip2
+
+gavinhoward/bc: bc
+
+one-true-awk: awk
+
+toolbox: getevent getprop setprop start stop
+
+toybox (0.8.4-ish): **[** acpi base64 basename **blkdiscard** blkid blockdev cal cat chattr chcon
+chgrp chmod chown chroot chrt cksum clear cmp comm cp cpio cut date
+dd devmem df diff dirname dmesg dos2unix du echo egrep env expand
+expr fallocate false fgrep file find flock fmt free freeramdisk fsfreeze
+fsync getconf getenforce getfattr getopt grep groups gunzip gzip head
+help hostname hwclock i2cdetect i2cdump i2cget i2cset iconv id ifconfig
+inotifyd insmod install ionice iorenice iotop kill killall ln load\_policy
+log logname losetup ls lsattr lsmod lsof lspci lsusb makedevs md5sum
+microcom mkdir mkfifo mknod mkswap mktemp modinfo modprobe more mount
+mountpoint mv nbd-client nc netcat netstat nice nl nohup nproc nsenter
+od partprobe paste patch pgrep pidof ping ping6 pivot\_root pkill pmap
+printenv printf prlimit ps pwd pwdx readelf readlink realpath renice
+restorecon rev rfkill rm rmdir rmmod **rtcwake** runcon sed sendevent
+seq setenforce setfattr setsid sha1sum sha224sum sha256sum sha384sum
+sha512sum sleep sort split stat strings stty swapoff swapon sync sysctl
+tac tail tar taskset tee **test** time timeout top touch tr traceroute
+traceroute6 true truncate tty tunctl ulimit umount uname uniq unix2dos
+unlink unshare uptime usleep uudecode uuencode uuidgen vconfig vi
+vmstat watch wc which whoami xargs xxd yes zcat
diff --git a/storaged/include/storaged.h b/storaged/include/storaged.h
index 6f92048..79b5d41 100644
--- a/storaged/include/storaged.h
+++ b/storaged/include/storaged.h
@@ -86,6 +86,7 @@
     sp<android::hardware::health::V2_0::IHealth> health;
     unique_ptr<storage_info_t> storage_info;
     static const uint32_t current_version;
+    Mutex proto_lock;
     unordered_map<userid_t, bool> proto_loaded;
     void load_proto(userid_t user_id);
     char* prepare_proto(userid_t user_id, StoragedProto* proto);
diff --git a/storaged/storaged.cpp b/storaged/storaged.cpp
index 573b8c5..b7aa89f 100644
--- a/storaged/storaged.cpp
+++ b/storaged/storaged.cpp
@@ -162,6 +162,8 @@
 }
 
 void storaged_t::add_user_ce(userid_t user_id) {
+    Mutex::Autolock _l(proto_lock);
+
     if (!proto_loaded[user_id]) {
         load_proto(user_id);
         proto_loaded[user_id] = true;
@@ -169,6 +171,8 @@
 }
 
 void storaged_t::remove_user_ce(userid_t user_id) {
+    Mutex::Autolock _l(proto_lock);
+
     proto_loaded[user_id] = false;
     mUidm.clear_user_history(user_id);
     RemoveFileIfExists(proto_path(user_id), nullptr);
@@ -298,6 +302,8 @@
 }
 
 void storaged_t::flush_protos(unordered_map<int, StoragedProto>* protos) {
+    Mutex::Autolock _l(proto_lock);
+
     for (auto& it : *protos) {
         /*
          * Don't flush proto if we haven't attempted to load it from file.
diff --git a/toolbox/Android.bp b/toolbox/Android.bp
index 4ca5f5a..3207824 100644
--- a/toolbox/Android.bp
+++ b/toolbox/Android.bp
@@ -19,7 +19,6 @@
 cc_defaults {
     name: "toolbox_binary_defaults",
     defaults: ["toolbox_defaults"],
-    cpp_std: "experimental",
     srcs: [
         "toolbox.c",
         "getevent.c",
diff --git a/trusty/apploader/Android.bp b/trusty/apploader/Android.bp
new file mode 100644
index 0000000..7e97cb8
--- /dev/null
+++ b/trusty/apploader/Android.bp
@@ -0,0 +1,36 @@
+//
+// Copyright (C) 2020 The Android Open-Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_binary {
+    name: "trusty_apploader",
+    vendor: true,
+
+    srcs: [
+        "apploader.cpp",
+    ],
+
+    shared_libs: [
+        "libbase",
+        "libc",
+        "liblog",
+        "libtrusty",
+        "libdmabufheap",
+    ],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+}
diff --git a/trusty/apploader/apploader.cpp b/trusty/apploader/apploader.cpp
new file mode 100644
index 0000000..5d5d882
--- /dev/null
+++ b/trusty/apploader/apploader.cpp
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TrustyAppLoader"
+
+#include <BufferAllocator/BufferAllocator.h>
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/sendfile.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <trusty/tipc.h>
+#include <unistd.h>
+#include <algorithm>
+#include <string>
+
+#include "apploader_ipc.h"
+
+using android::base::unique_fd;
+using std::string;
+
+constexpr const char kTrustyDefaultDeviceName[] = "/dev/trusty-ipc-dev0";
+
+static const char* dev_name = kTrustyDefaultDeviceName;
+
+static const char* _sopts = "hs";
+static const struct option _lopts[] = {
+        {"help", no_argument, 0, 'h'},
+        {"dev", required_argument, 0, 'D'},
+        {0, 0, 0, 0},
+};
+
+static const char* usage =
+        "Usage: %s [options] package-file\n"
+        "\n"
+        "options:\n"
+        "  -h, --help            prints this message and exit\n"
+        "  -D, --dev name        Trusty device name\n"
+        "\n";
+
+static void print_usage_and_exit(const char* prog, int code) {
+    fprintf(stderr, usage, prog);
+    exit(code);
+}
+
+static void parse_options(int argc, char** argv) {
+    int c;
+    int oidx = 0;
+
+    while (1) {
+        c = getopt_long(argc, argv, _sopts, _lopts, &oidx);
+        if (c == -1) {
+            break; /* done */
+        }
+
+        switch (c) {
+            case 'h':
+                print_usage_and_exit(argv[0], EXIT_SUCCESS);
+                break;
+
+            case 'D':
+                dev_name = strdup(optarg);
+                break;
+
+            default:
+                print_usage_and_exit(argv[0], EXIT_FAILURE);
+        }
+    }
+}
+
+static unique_fd read_file(const char* file_name, off64_t* out_file_size) {
+    int rc;
+    long page_size = sysconf(_SC_PAGESIZE);
+    off64_t file_size, file_page_offset, file_page_size;
+    struct stat64 st;
+
+    unique_fd file_fd(TEMP_FAILURE_RETRY(open(file_name, O_RDONLY)));
+    if (!file_fd.ok()) {
+        fprintf(stderr, "Error opening file '%s': %s\n", file_name, strerror(errno));
+        return {};
+    }
+
+    rc = fstat64(file_fd, &st);
+    if (rc < 0) {
+        fprintf(stderr, "Error calling stat on file '%s': %s\n", file_name, strerror(errno));
+        return {};
+    }
+
+    assert(st.st_size >= 0);
+    file_size = st.st_size;
+
+    /* The dmabuf size needs to be a multiple of the page size */
+    file_page_offset = file_size & (page_size - 1);
+    if (file_page_offset) {
+        file_page_offset = page_size - file_page_offset;
+    }
+    if (__builtin_add_overflow(file_size, file_page_offset, &file_page_size)) {
+        fprintf(stderr, "Failed to page-align file size\n");
+        return {};
+    }
+
+    BufferAllocator alloc;
+    unique_fd dmabuf_fd(alloc.Alloc(kDmabufSystemHeapName, file_page_size));
+    if (!dmabuf_fd.ok()) {
+        fprintf(stderr, "Error creating dmabuf: %d\n", dmabuf_fd.get());
+        return dmabuf_fd;
+    }
+
+    void* shm = mmap(0, file_page_size, PROT_READ | PROT_WRITE, MAP_SHARED, dmabuf_fd, 0);
+    if (shm == MAP_FAILED) {
+        return {};
+    }
+
+    off64_t file_offset = 0;
+    while (file_offset < file_size) {
+        ssize_t num_read = TEMP_FAILURE_RETRY(
+                pread(file_fd, (char*)shm + file_offset, file_size - file_offset, file_offset));
+
+        if (num_read < 0) {
+            fprintf(stderr, "Error reading package file '%s': %s\n", file_name, strerror(errno));
+            break;
+        }
+
+        if (num_read == 0) {
+            fprintf(stderr, "Unexpected end of file '%s'\n", file_name);
+            break;
+        }
+
+        file_offset += (off64_t)num_read;
+    }
+
+    munmap(shm, file_page_size);
+
+    if (file_offset < file_size) {
+        return {};
+    }
+
+    assert(file_offset == file_size);
+    if (out_file_size) {
+        *out_file_size = file_size;
+    }
+
+    return dmabuf_fd;
+}
+
+static ssize_t send_load_message(int tipc_fd, int package_fd, off64_t package_size) {
+    struct apploader_header hdr = {
+            .cmd = APPLOADER_CMD_LOAD_APPLICATION,
+    };
+    struct apploader_load_app_req req = {
+            .package_size = static_cast<uint64_t>(package_size),
+    };
+    struct iovec tx[2] = {{&hdr, sizeof(hdr)}, {&req, sizeof(req)}};
+    struct trusty_shm shm = {
+            .fd = package_fd,
+            .transfer = TRUSTY_SHARE,
+    };
+    return tipc_send(tipc_fd, tx, 2, &shm, 1);
+}
+
+static ssize_t read_response(int tipc_fd) {
+    struct apploader_resp resp;
+    ssize_t rc = read(tipc_fd, &resp, sizeof(resp));
+    if (rc < 0) {
+        fprintf(stderr, "Failed to read response: %zd\n", rc);
+        return rc;
+    }
+
+    if (rc < sizeof(resp)) {
+        fprintf(stderr, "Not enough data in response: %zd\n", rc);
+        return -EIO;
+    }
+
+    if (resp.hdr.cmd != (APPLOADER_CMD_LOAD_APPLICATION | APPLOADER_RESP_BIT)) {
+        fprintf(stderr, "Invalid command in response: %u\n", resp.hdr.cmd);
+        return -EINVAL;
+    }
+
+    switch (resp.error) {
+        case APPLOADER_NO_ERROR:
+            break;
+        case APPLOADER_ERR_UNKNOWN_CMD:
+            fprintf(stderr, "Error: unknown command\n");
+            break;
+        case APPLOADER_ERR_INVALID_CMD:
+            fprintf(stderr, "Error: invalid command arguments\n");
+            break;
+        case APPLOADER_ERR_NO_MEMORY:
+            fprintf(stderr, "Error: out of Trusty memory\n");
+            break;
+        case APPLOADER_ERR_VERIFICATION_FAILED:
+            fprintf(stderr, "Error: failed to verify the package\n");
+            break;
+        case APPLOADER_ERR_LOADING_FAILED:
+            fprintf(stderr, "Error: failed to load the package\n");
+            break;
+        case APPLOADER_ERR_ALREADY_EXISTS:
+            fprintf(stderr, "Error: application already exists\n");
+            break;
+        case APPLOADER_ERR_INTERNAL:
+            fprintf(stderr, "Error: internal apploader error\n");
+            break;
+        default:
+            fprintf(stderr, "Unrecognized error: %u\n", resp.error);
+            break;
+    }
+
+    return static_cast<ssize_t>(resp.error);
+}
+
+static ssize_t send_app_package(const char* package_file_name) {
+    ssize_t rc = 0;
+    int tipc_fd = -1;
+    off64_t package_size;
+
+    unique_fd package_fd = read_file(package_file_name, &package_size);
+    if (!package_fd.ok()) {
+        rc = -1;
+        goto err_read_file;
+    }
+
+    tipc_fd = tipc_connect(dev_name, APPLOADER_PORT);
+    if (tipc_fd < 0) {
+        fprintf(stderr, "Failed to connect to Trusty app loader: %s\n", strerror(-tipc_fd));
+        rc = tipc_fd;
+        goto err_tipc_connect;
+    }
+
+    rc = send_load_message(tipc_fd, package_fd, package_size);
+    if (rc < 0) {
+        fprintf(stderr, "Failed to send package: %zd\n", rc);
+        goto err_send;
+    }
+
+    rc = read_response(tipc_fd);
+
+err_send:
+    tipc_close(tipc_fd);
+err_tipc_connect:
+err_read_file:
+    return rc;
+}
+
+int main(int argc, char** argv) {
+    parse_options(argc, argv);
+    if (optind + 1 != argc) {
+        print_usage_and_exit(argv[0], EXIT_FAILURE);
+    }
+
+    int rc = send_app_package(argv[optind]);
+    return rc == 0 ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/trusty/apploader/apploader_ipc.h b/trusty/apploader/apploader_ipc.h
new file mode 100644
index 0000000..d8c915e
--- /dev/null
+++ b/trusty/apploader/apploader_ipc.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+
+#define APPLOADER_PORT "com.android.trusty.apploader"
+
+enum apploader_command : uint32_t {
+    APPLOADER_REQ_SHIFT = 1,
+    APPLOADER_RESP_BIT = 1,
+
+    APPLOADER_CMD_LOAD_APPLICATION = (0 << APPLOADER_REQ_SHIFT),
+    APPLOADER_CMD_GET_VERSION = (1 << APPLOADER_REQ_SHIFT),
+    APPLOADER_CMD_UNLOAD_APPLICATION = (2 << APPLOADER_REQ_SHIFT),
+};
+
+/**
+ * enum apploader_error - error codes for apploader
+ * @APPLOADER_NO_ERROR:                 no error
+ * @APPLOADER_ERR_UNKNOWN_CMD:          unknown or not implemented command
+ * @APPLOADER_ERR_INVALID_CMD:          invalid arguments or inputs passed to
+ *                                      command
+ * @APPLOADER_ERR_NO_MEMORY:            failed to allocate memory
+ * @APPLOADER_ERR_VERIFICATION_FAILED:  failed to verify input application
+ *                                      package for any reason, e.g., signature
+ *                                      verification failed
+ * @APPLOADER_ERR_LOADING_FAILED:       Trusty kernel or apploader service
+ *                                      failed to load application
+ * @APPLOADER_ERR_ALREADY_EXISTS:       application has already been loaded
+ * @APPLOADER_ERR_INTERNAL:             miscellaneous or internal apploader
+ *                                      error not covered by the above
+ */
+enum apploader_error : uint32_t {
+    APPLOADER_NO_ERROR = 0,
+    APPLOADER_ERR_UNKNOWN_CMD,
+    APPLOADER_ERR_INVALID_CMD,
+    APPLOADER_ERR_NO_MEMORY,
+    APPLOADER_ERR_VERIFICATION_FAILED,
+    APPLOADER_ERR_LOADING_FAILED,
+    APPLOADER_ERR_ALREADY_EXISTS,
+    APPLOADER_ERR_INTERNAL,
+};
+
+/**
+ * apploader_header - Serial header for communicating with apploader
+ * @cmd: the command; one of &enum apploader_command values.
+ */
+struct apploader_header {
+    uint32_t cmd;
+} __packed;
+
+/**
+ * apploader_load_app_req - Serial arguments for LOAD_APPLICATION command
+ * @package_size: size of the application package.
+ *
+ * Load an application from a given memory region. The request message also
+ * contains a handle for a memfd that contains the application package.
+ *
+ * The response is a &struct apploader_resp with the error code or
+ * %APPLOADER_NO_ERROR on success.
+ */
+struct apploader_load_app_req {
+    uint64_t package_size;
+} __packed;
+
+/**
+ * apploader_resp - Common header for all apploader responses
+ * @hdr - header with command value.
+ * @error - error code returned by peer; one of &enum apploader_error values.
+ *
+ * This structure is followed by the response-specific payload, if the command
+ * has one.
+ */
+struct apploader_resp {
+    struct apploader_header hdr;
+    uint32_t error;
+} __packed;
diff --git a/trusty/trusty-base.mk b/trusty/trusty-base.mk
index fd8daa8..12521b0 100644
--- a/trusty/trusty-base.mk
+++ b/trusty/trusty-base.mk
@@ -24,7 +24,8 @@
 
 PRODUCT_PACKAGES += \
 	android.hardware.keymaster@4.0-service.trusty \
-	android.hardware.gatekeeper@1.0-service.trusty
+	android.hardware.gatekeeper@1.0-service.trusty \
+	trusty_apploader
 
 PRODUCT_PROPERTY_OVERRIDES += \
 	ro.hardware.keystore=trusty \