Merge "Build: Include lock-inversion agent in debug builds"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 1c02da6..757c513 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -617,6 +617,9 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/super.img)
 
 $(call add-clean-step, find $(PRODUCT_OUT) -type f -name "generated_*_image_info.txt" -print0 | xargs -0 rm -f)
+
+# Clean up libicuuc.so and libicui18n.so
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*/libicu*)
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index dbb4b82..e08b56b 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -33,11 +33,6 @@
 PRODUCT_PACKAGES += \
     ext \
 
-# Libcore ICU. TODO(b/124218500): Remove them explicitly when the bug is resolved.
-PRODUCT_PACKAGES += \
-    libicui18n \
-    libicuuc \
-
 # Android Runtime APEX module.
 PRODUCT_PACKAGES += com.android.runtime
 PRODUCT_HOST_PACKAGES += com.android.runtime
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
new file mode 100644
index 0000000..b5ae009
--- /dev/null
+++ b/tools/releasetools/Android.bp
@@ -0,0 +1,72 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+python_defaults {
+    name: "releasetools_test_defaults",
+    version: {
+        py2: {
+            enabled: true,
+            embedded_launcher: false,
+        },
+        py3: {
+            enabled: false,
+        },
+    },
+}
+
+python_library_host {
+    name: "releasetools_lib",
+    defaults: ["releasetools_test_defaults"],
+    srcs: [
+        "add_img_to_target_files.py",
+        "apex_utils.py",
+        "blockimgdiff.py",
+        "build_image.py",
+        "build_super_image.py",
+        "check_ota_package_signature.py",
+        "check_target_files_signatures.py",
+        "common.py",
+        "edify_generator.py",
+        "img_from_target_files.py",
+        "make_recovery_patch.py",
+        "merge_target_files.py",
+        "ota_from_target_files.py",
+        "ota_package_parser.py",
+        "rangelib.py",
+        "sign_target_files_apks.py",
+        "sparse_img.py",
+        "target_files_diff.py",
+        "validate_target_files.py",
+        "verity_utils.py",
+    ],
+}
+
+python_test_host {
+    name: "releasetools_test",
+    defaults: ["releasetools_test_defaults"],
+    main: "test_utils.py",
+    srcs: [
+        "test_*.py",
+    ],
+    libs: [
+        "releasetools_lib",
+    ],
+    data: [
+        "testdata/*",
+    ],
+    required: [
+        "otatools",
+    ],
+    test_suites: ["general-tests"],
+}
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index b7c33f5..b23eef1 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -187,6 +187,83 @@
       fd.write(data)
 
 
+class FileImage(Image):
+  """An image wrapped around a raw image file."""
+
+  def __init__(self, path, hashtree_info_generator=None):
+    self.path = path
+    self.blocksize = 4096
+    self._file_size = os.path.getsize(self.path)
+    self._file = open(self.path, 'r')
+
+    if self._file_size % self.blocksize != 0:
+      raise ValueError("Size of file %s must be multiple of %d bytes, but is %d"
+                       % self.path, self.blocksize, self._file_size)
+
+    self.total_blocks = self._file_size / self.blocksize
+    self.care_map = RangeSet(data=(0, self.total_blocks))
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+
+    self.generator_lock = threading.Lock()
+
+    self.hashtree_info = None
+    if hashtree_info_generator:
+      self.hashtree_info = hashtree_info_generator.Generate(self)
+
+    zero_blocks = []
+    nonzero_blocks = []
+    reference = '\0' * self.blocksize
+
+    for i in range(self.total_blocks):
+      d = self._file.read(self.blocksize)
+      if d == reference:
+        zero_blocks.append(i)
+        zero_blocks.append(i+1)
+      else:
+        nonzero_blocks.append(i)
+        nonzero_blocks.append(i+1)
+
+    assert zero_blocks or nonzero_blocks
+
+    self.file_map = {}
+    if zero_blocks:
+      self.file_map["__ZERO"] = RangeSet(data=zero_blocks)
+    if nonzero_blocks:
+      self.file_map["__NONZERO"] = RangeSet(data=nonzero_blocks)
+    if self.hashtree_info:
+      self.file_map["__HASHTREE"] = self.hashtree_info.hashtree_range
+
+  def __del__(self):
+    self._file.close()
+
+  def _GetRangeData(self, ranges):
+    # Use a lock to protect the generator so that we will not run two
+    # instances of this generator on the same object simultaneously.
+    with self.generator_lock:
+      for s, e in ranges:
+        self._file.seek(s * self.blocksize)
+        for _ in range(s, e):
+          yield self._file.read(self.blocksize)
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      h.update(data)
+    return h.hexdigest()
+
+  def ReadRangeSet(self, ranges):
+    return list(self._GetRangeData(ranges))
+
+  def TotalSha1(self, include_clobbered_blocks=False):
+    assert not self.clobbered_blocks
+    return self.RangeSha1(self.care_map)
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges): # pylint: disable=not-an-iterable
+      fd.write(data)
+
+
 class Transfer(object):
   def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
                src_sha1, style, by_id):
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 171c794..3e2a113 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -824,6 +824,77 @@
   return tmp
 
 
+def GetUserImage(which, tmpdir, input_zip,
+                 info_dict=None,
+                 allow_shared_blocks=None,
+                 hashtree_info_generator=None,
+                 reset_file_map=False):
+  """Returns an Image object suitable for passing to BlockImageDiff.
+
+  This function loads the specified image from the given path. If the specified
+  image is sparse, it also performs additional processing for OTA purpose. For
+  example, it always adds block 0 to clobbered blocks list. It also detects
+  files that cannot be reconstructed from the block list, for whom we should
+  avoid applying imgdiff.
+
+  Args:
+    which: The partition name.
+    tmpdir: The directory that contains the prebuilt image and block map file.
+    input_zip: The target-files ZIP archive.
+    info_dict: The dict to be looked up for relevant info.
+    allow_shared_blocks: If image is sparse, whether having shared blocks is
+        allowed. If none, it is looked up from info_dict.
+    hashtree_info_generator: If present and image is sparse, generates the
+        hashtree_info for this sparse image.
+    reset_file_map: If true and image is sparse, reset file map before returning
+        the image.
+  Returns:
+    A Image object. If it is a sparse image and reset_file_map is False, the
+    image will have file_map info loaded.
+  """
+  if info_dict == None:
+    info_dict = LoadInfoDict(input_zip)
+
+  is_sparse = info_dict.get("extfs_sparse_flag")
+
+  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
+  # shared blocks (i.e. some blocks will show up in multiple files' block
+  # list). We can only allocate such shared blocks to the first "owner", and
+  # disable imgdiff for all later occurrences.
+  if allow_shared_blocks is None:
+    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
+
+  if is_sparse:
+    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
+                         hashtree_info_generator)
+    if reset_file_map:
+      img.ResetFileMap()
+    return img
+  else:
+    return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
+
+
+def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
+  """Returns a Image object suitable for passing to BlockImageDiff.
+
+  This function loads the specified non-sparse image from the given path.
+
+  Args:
+    which: The partition name.
+    tmpdir: The directory that contains the prebuilt image and block map file.
+  Returns:
+    A Image object.
+  """
+  path = os.path.join(tmpdir, "IMAGES", which + ".img")
+  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
+
+  # The image and map files must have been created prior to calling
+  # ota_from_target_files.py (since LMP).
+  assert os.path.exists(path) and os.path.exists(mappath)
+
+  return blockimgdiff.FileImage(path, hashtree_info_generator=
+                                hashtree_info_generator)
+
 def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
                    hashtree_info_generator=None):
   """Returns a SparseImage object suitable for passing to BlockImageDiff.
@@ -2068,7 +2139,7 @@
 
 
 DataImage = blockimgdiff.DataImage
-
+EmptyImage = blockimgdiff.EmptyImage
 
 # map recovery.fstab's fs_types to mount/format "partition types"
 PARTITION_TYPES = {
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index ad40bd4..dd3e190 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -917,17 +917,14 @@
 
   script.ShowProgress(system_progress, 0)
 
-  # See the notes in WriteBlockIncrementalOTAPackage().
-  allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
-
   def GetBlockDifference(partition):
     # Full OTA is done as an "incremental" against an empty source image. This
     # has the effect of writing new data from the package to the entire
     # partition, but lets us reuse the updater code that writes incrementals to
     # do it.
-    tgt = common.GetSparseImage(partition, OPTIONS.input_tmp, input_zip,
-                                allow_shared_blocks)
-    tgt.ResetFileMap()
+    tgt = common.GetUserImage(partition, OPTIONS.input_tmp, input_zip,
+                              info_dict=target_info,
+                              reset_file_map=True)
     diff = common.BlockDifference(partition, tgt, src=None)
     return diff
 
@@ -1512,8 +1509,10 @@
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
       source_version=source_api_version,
+      source_tmp=OPTIONS.source_tmp,
       target_zip=target_zip,
       target_version=target_api_version,
+      target_tmp=OPTIONS.target_tmp,
       output_zip=output_zip,
       script=script,
       metadata=metadata,
@@ -1529,20 +1528,20 @@
   target_recovery = common.GetBootableImage(
       "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
 
-  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
-  # shared blocks (i.e. some blocks will show up in multiple files' block
-  # list). We can only allocate such shared blocks to the first "owner", and
-  # disable imgdiff for all later occurrences.
+  # See notes in common.GetUserImage()
   allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
                          target_info.get('ext4_share_dup_blocks') == "true")
-  system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
-                                     allow_shared_blocks)
+  system_src = common.GetUserImage("system", OPTIONS.source_tmp, source_zip,
+                                   info_dict=source_info,
+                                   allow_shared_blocks=allow_shared_blocks)
 
   hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
       "system", 4096, target_info)
-  system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
-                                     allow_shared_blocks,
-                                     hashtree_info_generator)
+  system_tgt = common.GetUserImage("system", OPTIONS.target_tmp, target_zip,
+                                   info_dict=target_info,
+                                   allow_shared_blocks=allow_shared_blocks,
+                                   hashtree_info_generator=
+                                   hashtree_info_generator)
 
   blockimgdiff_version = max(
       int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -1567,13 +1566,16 @@
   if HasVendorPartition(target_zip):
     if not HasVendorPartition(source_zip):
       raise RuntimeError("can't generate incremental that adds /vendor")
-    vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
-                                       allow_shared_blocks)
+    vendor_src = common.GetUserImage("vendor", OPTIONS.source_tmp, source_zip,
+                                     info_dict=source_info,
+                                     allow_shared_blocks=allow_shared_blocks)
     hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
         "vendor", 4096, target_info)
-    vendor_tgt = common.GetSparseImage(
-        "vendor", OPTIONS.target_tmp, target_zip, allow_shared_blocks,
-        hashtree_info_generator)
+    vendor_tgt = common.GetUserImage(
+        "vendor", OPTIONS.target_tmp, target_zip,
+        info_dict=target_info,
+        allow_shared_blocks=allow_shared_blocks,
+        hashtree_info_generator=hashtree_info_generator)
 
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
diff --git a/tools/releasetools/test_apex_utils.py b/tools/releasetools/test_apex_utils.py
index 2f8ee49..76d4d58 100644
--- a/tools/releasetools/test_apex_utils.py
+++ b/tools/releasetools/test_apex_utils.py
@@ -56,8 +56,10 @@
 
   def test_SignApexPayload_withSignerHelper(self):
     payload_file = self._GetTestPayload()
+    signing_helper = os.path.join(self.testdata_dir, 'signing_helper.sh')
+    os.chmod(signing_helper, 0o700)
     payload_signer_args = '--signing_helper_with_files {}'.format(
-        os.path.join(self.testdata_dir, 'signing_helper.sh'))
+        signing_helper)
     apex_utils.SignApexPayload(
         payload_file,
         self.payload_key,
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 1aabaa2..b6d47d4 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -14,9 +14,13 @@
 # limitations under the License.
 #
 
+import os
+from hashlib import sha1
+
 import common
 from blockimgdiff import (
-    BlockImageDiff, DataImage, EmptyImage, HeapItem, ImgdiffStats, Transfer)
+    BlockImageDiff, DataImage, EmptyImage, FileImage, HeapItem, ImgdiffStats,
+    Transfer)
 from rangelib import RangeSet
 from test_utils import ReleaseToolsTestCase
 
@@ -264,7 +268,42 @@
 
 
 class DataImageTest(ReleaseToolsTestCase):
-    def test_read_range_set(self):
-        data = "file" + ('\0' * 4092)
-        image = DataImage(data)
-        self.assertEqual(data, "".join(image.ReadRangeSet(image.care_map)))
+  def test_read_range_set(self):
+    data = "file" + ('\0' * 4092)
+    image = DataImage(data)
+    self.assertEqual(data, "".join(image.ReadRangeSet(image.care_map)))
+
+
+class FileImageTest(ReleaseToolsTestCase):
+  def setUp(self):
+    self.file_path = common.MakeTempFile()
+    self.data = os.urandom(4096 * 4)
+    with open(self.file_path, 'w') as f:
+      f.write(self.data)
+    self.file = FileImage(self.file_path)
+
+  def test_totalsha1(self):
+    self.assertEqual(sha1(self.data).hexdigest(), self.file.TotalSha1())
+
+  def test_ranges(self):
+    blocksize = self.file.blocksize
+    for s in range(4):
+      for e in range(s, 4):
+        expected_data = self.data[s * blocksize : e * blocksize]
+
+        rs = RangeSet([s, e])
+        data = "".join(self.file.ReadRangeSet(rs))
+        self.assertEqual(expected_data, data)
+
+        sha1sum = self.file.RangeSha1(rs)
+        self.assertEqual(sha1(expected_data).hexdigest(), sha1sum)
+
+        tmpfile = common.MakeTempFile()
+        with open(tmpfile, 'w') as f:
+          self.file.WriteRangeDataToFd(rs, f)
+        with open(tmpfile, 'r') as f:
+          self.assertEqual(expected_data, f.read())
+
+  def test_read_all(self):
+    data = "".join(self.file.ReadRangeSet(self.file.care_map))
+    self.assertEqual(self.data, data)
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 466fde1..bb0236b 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -1233,6 +1233,7 @@
     """Uses testdata/payload_signer.sh as the external payload signer."""
     common.OPTIONS.payload_signer = os.path.join(
         self.testdata_dir, 'payload_signer.sh')
+    os.chmod(common.OPTIONS.payload_signer, 0o700)
     common.OPTIONS.payload_signer_args = [
         os.path.join(self.testdata_dir, 'testkey.pk8')]
     payload_signer = PayloadSigner()
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
old mode 100644
new mode 100755
index edb3d41..ca127b1
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
 #
 # Copyright (C) 2018 The Android Open Source Project
 #
@@ -40,6 +41,19 @@
 
 def get_search_path():
   """Returns the search path that has 'framework/signapk.jar' under."""
+
+  def signapk_exists(path):
+    signapk_path = os.path.realpath(
+        os.path.join(path, 'framework', 'signapk.jar'))
+    return os.path.exists(signapk_path)
+
+  # Try with ANDROID_BUILD_TOP first.
+  full_path = os.path.realpath(os.path.join(
+      os.environ.get('ANDROID_BUILD_TOP', ''), 'out', 'host', 'linux-x86'))
+  if signapk_exists(full_path):
+    return full_path
+
+  # Otherwise try going with relative pathes.
   current_dir = os.path.dirname(os.path.realpath(__file__))
   for path in (
       # In relative to 'build/make/tools/releasetools' in the Android source.
@@ -47,9 +61,7 @@
       # Or running the script unpacked from otatools.zip.
       ['..']):
     full_path = os.path.realpath(os.path.join(current_dir, *path))
-    signapk_path = os.path.realpath(
-        os.path.join(full_path, 'framework', 'signapk.jar'))
-    if os.path.exists(signapk_path):
+    if signapk_exists(full_path):
       return full_path
   return None
 
@@ -123,3 +135,10 @@
 
   def tearDown(self):
     common.Cleanup()
+
+
+if __name__ == '__main__':
+  testsuite = unittest.TestLoader().discover(
+      os.path.dirname(os.path.realpath(__file__)))
+  # atest needs a verbosity level of >= 2 to correctly parse the result.
+  unittest.TextTestRunner(verbosity=2).run(testsuite)