Merge "Envsetup: Fix lunch choice with number in zsh"
diff --git a/core/definitions.mk b/core/definitions.mk
index ded969b..51eeadc 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -3234,7 +3234,7 @@
 ##
 ## $(1): path to validate
 define try-validate-path-is-subdir
-$(strip 
+$(strip \
     $(if $(filter /%,$(1)),
         $(1) starts with a slash
     )
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 56d8e29..7128e3a 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -183,62 +183,26 @@
 TARGET_COPY_OUT_DATA := data
 TARGET_COPY_OUT_ASAN := $(TARGET_COPY_OUT_DATA)/asan
 TARGET_COPY_OUT_OEM := oem
-TARGET_COPY_OUT_ODM := odm
-TARGET_COPY_OUT_PRODUCT := product
-TARGET_COPY_OUT_PRODUCT_SERVICES := product_services
 TARGET_COPY_OUT_RAMDISK := ramdisk
 TARGET_COPY_OUT_ROOT := root
 TARGET_COPY_OUT_RECOVERY := recovery
+# The directory used for optional partitions depend on the BoardConfig, so
+# they're defined to placeholder values here and swapped after reading the
+# BoardConfig, to be either the partition dir, or a subdir within 'system'.
+_vendor_path_placeholder := ||VENDOR-PATH-PH||
+_product_path_placeholder := ||PRODUCT-PATH-PH||
+_product_services_path_placeholder := ||PRODUCT_SERVICES-PATH-PH||
+_odm_path_placeholder := ||ODM-PATH-PH||
+TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
+TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
+TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_services_path_placeholder)
+TARGET_COPY_OUT_ODM := $(_odm_path_placeholder)
 
 # Returns the non-sanitized version of the path provided in $1.
 define get_non_asan_path
 $(patsubst $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/%,$(PRODUCT_OUT)/%,$1)
 endef
 
-###########################################
-# Define TARGET_COPY_OUT_VENDOR to a placeholder, for at this point
-# we don't know if the device wants to build a separate vendor.img
-# or just build vendor stuff into system.img.
-# A device can set up TARGET_COPY_OUT_VENDOR to "vendor" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_vendor_path_placeholder := ||VENDOR-PATH-PH||
-TARGET_COPY_OUT_VENDOR := $(_vendor_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_PRODUCT to a placeholder, for at this point
-# we don't know if the device wants to build a separate product.img
-# or just build product stuff into system.img.
-# A device can set up TARGET_COPY_OUT_PRODUCT to "product" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_product_path_placeholder := ||PRODUCT-PATH-PH||
-TARGET_COPY_OUT_PRODUCT := $(_product_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_PRODUCT_SERVICES to a placeholder, for at this point
-# we don't know if the device wants to build a separate product_services.img
-# or just build product stuff into system.img.
-# A device can set up TARGET_COPY_OUT_PRODUCT_SERVICES to "product_services" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_product_services_path_placeholder := ||PRODUCT_SERVICES-PATH-PH||
-TARGET_COPY_OUT_PRODUCT_SERVICES := $(_product_services_path_placeholder)
-###########################################
-
-###########################################
-# Define TARGET_COPY_OUT_ODM to a placeholder, for at this point
-# we don't know if the device wants to build a separate odm.img
-# or just build odm stuff into vendor.img.
-# A device can set up TARGET_COPY_OUT_ODM to "odm" in its
-# BoardConfig.mk.
-# We'll substitute with the real value after loading BoardConfig.mk.
-_odm_path_placeholder := ||ODM-PATH-PH||
-TARGET_COPY_OUT_ODM := $(_odm_path_placeholder)
-###########################################
-
 #################################################################
 # Set up minimal BOOTCLASSPATH list of jars to build/execute
 # java code with dalvikvm/art.
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 8335906..8dec2d9 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -22,6 +22,15 @@
 PRODUCT_PACKAGES += \
     com.android.nfc_extras \
 
+# Applications
+PRODUCT_PACKAGES += \
+    DMService \
+    LiveWallpapersPicker \
+    PartnerBookmarksProvider \
+    RcsService \
+    SafetyRegulatoryInfo \
+    Stk \
+
 # OTA support
 PRODUCT_PACKAGES += \
     update_engine \
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 1bce60e..d7d1bc8 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -308,9 +308,8 @@
   hash_seed = "hash_seed-" + uuid_seed
   image_props["hash_seed"] = str(uuid.uuid5(uuid.NAMESPACE_URL, hash_seed))
 
-  succ = build_image.BuildImage(os.path.join(input_dir, what.upper()),
-                                image_props, output_file.name)
-  assert succ, "build " + what + ".img image failed"
+  build_image.BuildImage(
+      os.path.join(input_dir, what.upper()), image_props, output_file.name)
 
   output_file.Write()
   if block_list:
@@ -361,8 +360,7 @@
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
     image_props["fs_type"] = fstab["/data"].fs_type
-  succ = build_image.BuildImage(user_dir, image_props, img.name)
-  assert succ, "build userdata.img image failed"
+  build_image.BuildImage(user_dir, image_props, img.name)
 
   common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
   img.Write()
@@ -514,8 +512,7 @@
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
     image_props["fs_type"] = fstab["/cache"].fs_type
-  succ = build_image.BuildImage(user_dir, image_props, img.name)
-  assert succ, "build cache.img image failed"
+  build_image.BuildImage(user_dir, image_props, img.name)
 
   common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
   img.Write()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index c7d93d3..aeb4379 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -32,7 +32,6 @@
 import common
 from rangelib import RangeSet
 
-
 __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
 
 
@@ -649,6 +648,14 @@
 
     self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
 
+    if self.tgt.hashtree_info:
+      out.append("compute_hash_tree {} {} {} {} {}\n".format(
+          self.tgt.hashtree_info.hashtree_range.to_string_raw(),
+          self.tgt.hashtree_info.filesystem_range.to_string_raw(),
+          self.tgt.hashtree_info.hash_algorithm,
+          self.tgt.hashtree_info.salt,
+          self.tgt.hashtree_info.root_hash))
+
     # Zero out extended blocks as a workaround for bug 20881595.
     if self.tgt.extended:
       assert (WriteSplitTransfers(out, "zero", self.tgt.extended) ==
@@ -988,6 +995,12 @@
           assert touched[i] == 0
           touched[i] = 1
 
+    if self.tgt.hashtree_info:
+      for s, e in self.tgt.hashtree_info.hashtree_range:
+        for i in range(s, e):
+          assert touched[i] == 0
+          touched[i] = 1
+
     # Check that we've written every target block.
     for s, e in self.tgt.care_map:
       for i in range(s, e):
@@ -1533,6 +1546,9 @@
         AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
         continue
 
+      elif tgt_fn == "__HASHTREE":
+        continue
+
       elif tgt_fn in self.src.file_map:
         # Look for an exact pathname match in the source.
         AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index c422280..f1594d7 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -45,6 +45,13 @@
 BYTES_IN_MB = 1024 * 1024
 
 
+class BuildImageError(Exception):
+  """An Exception raised during image building."""
+
+  def __init__(self, message):
+    Exception.__init__(self, message)
+
+
 def RunCommand(cmd, verbose=None, env=None):
   """Echo and run the given command.
 
@@ -76,58 +83,55 @@
   cmd = ["fec", "-s", str(partition_size)]
   output, exit_code = RunCommand(cmd, False)
   if exit_code != 0:
-    return False, 0
-  return True, int(output)
+    raise BuildImageError("Failed to GetVerityFECSize:\n{}".format(output))
+  return int(output)
 
 
 def GetVerityTreeSize(partition_size):
   cmd = ["build_verity_tree", "-s", str(partition_size)]
   output, exit_code = RunCommand(cmd, False)
   if exit_code != 0:
-    return False, 0
-  return True, int(output)
+    raise BuildImageError("Failed to GetVerityTreeSize:\n{}".format(output))
+  return int(output)
 
 
 def GetVerityMetadataSize(partition_size):
   cmd = ["build_verity_metadata.py", "size", str(partition_size)]
   output, exit_code = RunCommand(cmd, False)
   if exit_code != 0:
-    return False, 0
-  return True, int(output)
+    raise BuildImageError("Failed to GetVerityMetadataSize:\n{}".format(output))
+  return int(output)
 
 
 def GetVeritySize(partition_size, fec_supported):
-  success, verity_tree_size = GetVerityTreeSize(partition_size)
-  if not success:
-    return 0
-  success, verity_metadata_size = GetVerityMetadataSize(partition_size)
-  if not success:
-    return 0
+  verity_tree_size = GetVerityTreeSize(partition_size)
+  verity_metadata_size = GetVerityMetadataSize(partition_size)
   verity_size = verity_tree_size + verity_metadata_size
   if fec_supported:
-    success, fec_size = GetVerityFECSize(partition_size + verity_size)
-    if not success:
-      return 0
+    fec_size = GetVerityFECSize(partition_size + verity_size)
     return verity_size + fec_size
   return verity_size
 
 
 def GetDiskUsage(path):
-  """Return number of bytes that "path" occupies on host.
+  """Returns the number of bytes that "path" occupies on host.
 
   Args:
     path: The directory or file to calculate size on
+
   Returns:
-    True and the number of bytes if successful,
-    False and 0 otherwise.
+    The number of bytes.
+
+  Raises:
+    BuildImageError: On error.
   """
   env = {"POSIXLY_CORRECT": "1"}
   cmd = ["du", "-s", path]
   output, exit_code = RunCommand(cmd, verbose=False, env=env)
   if exit_code != 0:
-    return False, 0
+    raise BuildImageError("Failed to get disk usage:\n{}".format(output))
   # POSIX du returns number of blocks with block size 512
-  return True, int(output.split()[0]) * 512
+  return int(output.split()[0]) * 512
 
 
 def GetSimgSize(image_file):
@@ -149,20 +153,85 @@
     avbtool: String with path to avbtool.
     footer_type: 'hash' or 'hashtree' for generating footer.
     partition_size: The size of the partition in question.
-    additional_args: Additional arguments to pass to 'avbtool
-      add_hashtree_image'.
+    additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+        or "avbtool add_hashtree_footer".
+
   Returns:
-    The maximum image size or 0 if an error occurred.
+    The maximum image size.
+
+  Raises:
+    BuildImageError: On error or getting invalid image size.
   """
   cmd = [avbtool, "add_%s_footer" % footer_type,
-         "--partition_size", partition_size, "--calc_max_image_size"]
+         "--partition_size", str(partition_size), "--calc_max_image_size"]
   cmd.extend(shlex.split(additional_args))
 
-  (output, exit_code) = RunCommand(cmd)
+  output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    return 0
-  else:
-    return int(output)
+    raise BuildImageError(
+        "Failed to calculate max image size:\n{}".format(output))
+  image_size = int(output)
+  if image_size <= 0:
+    raise BuildImageError(
+        "Invalid max image size: {}".format(output))
+  return image_size
+
+
+def AVBCalcMinPartitionSize(image_size, size_calculator):
+  """Calculates min partition size for a given image size.
+
+  Args:
+    image_size: The size of the image in question.
+    size_calculator: The function to calculate max image size
+        for a given partition size.
+
+  Returns:
+    The minimum partition size required to accommodate the image size.
+  """
+  # Use image size as partition size to approximate final partition size.
+  image_ratio = size_calculator(image_size) / float(image_size)
+
+  # Prepare a binary search for the optimal partition size.
+  lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
+
+  # Ensure lo is small enough: max_image_size should <= image_size.
+  delta = BLOCK_SIZE
+  max_image_size = size_calculator(lo)
+  while max_image_size > image_size:
+    image_ratio = max_image_size / float(lo)
+    lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
+    delta *= 2
+    max_image_size = size_calculator(lo)
+
+  hi = lo + BLOCK_SIZE
+
+  # Ensure hi is large enough: max_image_size should >= image_size.
+  delta = BLOCK_SIZE
+  max_image_size = size_calculator(hi)
+  while max_image_size < image_size:
+    image_ratio = max_image_size / float(hi)
+    hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
+    delta *= 2
+    max_image_size = size_calculator(hi)
+
+  partition_size = hi
+
+  # Start to binary search.
+  while lo < hi:
+    mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+    max_image_size = size_calculator(mid)
+    if max_image_size >= image_size:  # if mid can accommodate image_size
+      if mid < partition_size:  # if a smaller partition size is found
+        partition_size = mid
+      hi = mid
+    else:
+      lo = mid + BLOCK_SIZE
+
+  if OPTIONS.verbose:
+    print("AVBCalcMinPartitionSize({}): partition_size: {}.".format(
+        image_size, partition_size))
+
+  return partition_size
 
 
 def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
@@ -179,11 +248,11 @@
     key_path: Path to key to use or None.
     algorithm: Name of algorithm to use or None.
     salt: The salt to use (a hexadecimal string) or None.
-    additional_args: Additional arguments to pass to 'avbtool
-        add_hashtree_image'.
+    additional_args: Additional arguments to pass to "avbtool add_hash_footer"
+        or "avbtool add_hashtree_footer".
 
-  Returns:
-    True if the operation succeeded.
+  Raises:
+    BuildImageError: On error.
   """
   cmd = [avbtool, "add_%s_footer" % footer_type,
          "--partition_size", partition_size,
@@ -199,9 +268,8 @@
 
   output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    print("Failed to add AVB footer! Error: %s" % output)
-    return False
-  return True
+    raise BuildImageError(
+        "Failed to add AVB footer:\n{}".format(output))
 
 
 def AdjustPartitionSizeForVerity(partition_size, fec_supported):
@@ -258,9 +326,8 @@
          verity_path, verity_fec_path]
   output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    print("Could not build FEC data! Error: %s" % output)
-    return False
-  return True
+    raise BuildImageError(
+        "Failed to build FEC data:\n{}".format(output))
 
 
 def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
@@ -268,12 +335,11 @@
          verity_image_path]
   output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    print("Could not build verity tree! Error: %s" % output)
-    return False
+    raise BuildImageError(
+        "Failed to build verity tree:\n{}".format(output))
   root, salt = output.split()
   prop_dict["verity_root_hash"] = root
   prop_dict["verity_salt"] = salt
-  return True
 
 
 def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
@@ -287,9 +353,8 @@
     cmd.append("--verity_disable")
   output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    print("Could not build verity metadata! Error: %s" % output)
-    return False
-  return True
+    raise BuildImageError(
+        "Failed to build verity metadata:\n{}".format(output))
 
 
 def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
@@ -298,49 +363,45 @@
   Args:
     sparse_image_path: the path to the (sparse) image
     unsparse_image_path: the path to the (unsparse) image
-  Returns:
-    True on success, False on failure.
+
+  Raises:
+    BuildImageError: On error.
   """
   cmd = ["append2simg", sparse_image_path, unsparse_image_path]
   output, exit_code = RunCommand(cmd)
   if exit_code != 0:
-    print("%s: %s" % (error_message, output))
-    return False
-  return True
+    raise BuildImageError("{}:\n{}".format(error_message, output))
 
 
 def Append(target, file_to_append, error_message):
-  """Appends file_to_append to target."""
+  """Appends file_to_append to target.
+
+  Raises:
+    BuildImageError: On error.
+  """
   try:
     with open(target, "a") as out_file, open(file_to_append, "r") as input_file:
       for line in input_file:
         out_file.write(line)
   except IOError:
-    print(error_message)
-    return False
-  return True
+    raise BuildImageError(error_message)
 
 
 def BuildVerifiedImage(data_image_path, verity_image_path,
                        verity_metadata_path, verity_fec_path,
                        padding_size, fec_supported):
-  if not Append(verity_image_path, verity_metadata_path,
-                "Could not append verity metadata!"):
-    return False
+  Append(
+      verity_image_path, verity_metadata_path,
+      "Could not append verity metadata!")
 
   if fec_supported:
-    # build FEC for the entire partition, including metadata
-    if not BuildVerityFEC(data_image_path, verity_image_path,
-                          verity_fec_path, padding_size):
-      return False
+    # Build FEC for the entire partition, including metadata.
+    BuildVerityFEC(
+        data_image_path, verity_image_path, verity_fec_path, padding_size)
+    Append(verity_image_path, verity_fec_path, "Could not append FEC!")
 
-    if not Append(verity_image_path, verity_fec_path, "Could not append FEC!"):
-      return False
-
-  if not Append2Simg(data_image_path, verity_image_path,
-                     "Could not append verity data!"):
-    return False
-  return True
+  Append2Simg(
+      data_image_path, verity_image_path, "Could not append verity data!")
 
 
 def UnsparseImage(sparse_image_path, replace=True):
@@ -351,15 +412,15 @@
     if replace:
       os.unlink(unsparse_image_path)
     else:
-      return True, unsparse_image_path
+      return unsparse_image_path
   inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
-  (inflate_output, exit_code) = RunCommand(inflate_command)
+  inflate_output, exit_code = RunCommand(inflate_command)
   if exit_code != 0:
-    print("Error: '%s' failed with exit code %d:\n%s" % (
-        inflate_command, exit_code, inflate_output))
     os.remove(unsparse_image_path)
-    return False, None
-  return True, unsparse_image_path
+    raise BuildImageError(
+        "Error: '{}' failed with exit code {}:\n{}".format(
+            inflate_command, exit_code, inflate_output))
+  return unsparse_image_path
 
 
 def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
@@ -369,8 +430,10 @@
     out_file: the location to write the verifiable image at
     prop_dict: a dictionary of properties required for image creation and
                verification
-  Returns:
-    True on success, False otherwise.
+
+  Raises:
+    AssertionError: On invalid partition sizes.
+    BuildImageError: On other errors.
   """
   # get properties
   image_size = int(prop_dict["image_size"])
@@ -382,50 +445,44 @@
     signer_path = prop_dict["verity_signer_cmd"]
   signer_args = OPTIONS.verity_signer_args
 
-  # make a tempdir
   tempdir_name = common.MakeTempDir(suffix="_verity_images")
 
-  # get partial image paths
+  # Get partial image paths.
   verity_image_path = os.path.join(tempdir_name, "verity.img")
   verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
   verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
 
-  # build the verity tree and get the root hash and salt
-  if not BuildVerityTree(out_file, verity_image_path, prop_dict):
-    return False
+  # Build the verity tree and get the root hash and salt.
+  BuildVerityTree(out_file, verity_image_path, prop_dict)
 
-  # build the metadata blocks
+  # Build the metadata blocks.
   root_hash = prop_dict["verity_root_hash"]
   salt = prop_dict["verity_salt"]
   verity_disable = "verity_disable" in prop_dict
-  if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
-                             block_dev, signer_path, signer_key, signer_args,
-                             verity_disable):
-    return False
+  BuildVerityMetadata(
+      image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path,
+      signer_key, signer_args, verity_disable)
 
-  # build the full verified image
+  # Build the full verified image.
   partition_size = int(prop_dict["partition_size"])
   verity_size = int(prop_dict["verity_size"])
 
   padding_size = partition_size - image_size - verity_size
   assert padding_size >= 0
 
-  if not BuildVerifiedImage(out_file,
-                            verity_image_path,
-                            verity_metadata_path,
-                            verity_fec_path,
-                            padding_size,
-                            fec_supported):
-    return False
-
-  return True
+  BuildVerifiedImage(
+      out_file, verity_image_path, verity_metadata_path, verity_fec_path,
+      padding_size, fec_supported)
 
 
 def ConvertBlockMapToBaseFs(block_map_file):
   base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
   convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
-  (_, exit_code) = RunCommand(convert_command)
-  return base_fs_file if exit_code == 0 else None
+  output, exit_code = RunCommand(convert_command)
+  if exit_code != 0:
+    raise BuildImageError(
+        "Failed to call blk_alloc_to_base_fs:\n{}".format(output))
+  return base_fs_file
 
 
 def SetUpInDirAndFsConfig(origin_in, prop_dict):
@@ -489,11 +546,9 @@
     ext4fs_output: The output string from mke2fs command.
     prop_dict: The property dict.
 
-  Returns:
-    The check result.
-
   Raises:
     AssertionError: On invalid input.
+    BuildImageError: On check failure.
   """
   assert ext4fs_output is not None
   assert prop_dict.get('fs_type', '').startswith('ext4')
@@ -511,12 +566,11 @@
   adjusted_blocks = total_blocks - headroom_blocks
   if used_blocks > adjusted_blocks:
     mount_point = prop_dict["mount_point"]
-    print("Error: Not enough room on %s (total: %d blocks, used: %d blocks, "
-          "headroom: %d blocks, available: %d blocks)" % (
-              mount_point, total_blocks, used_blocks, headroom_blocks,
-              adjusted_blocks))
-    return False
-  return True
+    raise BuildImageError(
+        "Error: Not enough room on {} (total: {} blocks, used: {} blocks, "
+        "headroom: {} blocks, available: {} blocks)".format(
+            mount_point, total_blocks, used_blocks, headroom_blocks,
+            adjusted_blocks))
 
 
 def BuildImage(in_dir, prop_dict, out_file, target_out=None):
@@ -532,8 +586,8 @@
         under system/core/libcutils) reads device specific FS config files from
         there.
 
-  Returns:
-    True iff the image is built successfully.
+  Raises:
+    BuildImageError: On build image failures.
   """
   in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
 
@@ -549,17 +603,33 @@
   verity_supported = prop_dict.get("verity") == "true"
   verity_fec_supported = prop_dict.get("verity_fec") == "true"
 
+  avb_footer_type = None
+  if prop_dict.get("avb_hash_enable") == "true":
+    avb_footer_type = "hash"
+  elif prop_dict.get("avb_hashtree_enable") == "true":
+    avb_footer_type = "hashtree"
+
+  if avb_footer_type:
+    avbtool = prop_dict.get("avb_avbtool")
+    avb_signing_args = prop_dict.get(
+        "avb_add_" + avb_footer_type + "_footer_args")
+
   if (prop_dict.get("use_dynamic_partition_size") == "true" and
       "partition_size" not in prop_dict):
-    # if partition_size is not defined, use output of `du' + reserved_size
-    success, size = GetDiskUsage(in_dir)
-    if not success:
-      return False
+    # If partition_size is not defined, use output of `du' + reserved_size.
+    size = GetDiskUsage(in_dir)
     if OPTIONS.verbose:
       print("The tree size of %s is %d MB." % (in_dir, size // BYTES_IN_MB))
     size += int(prop_dict.get("partition_reserved_size", 0))
     # Round this up to a multiple of 4K so that avbtool works
     size = common.RoundUpTo4K(size)
+    # Adjust partition_size to add more space for AVB footer, to prevent
+    # it from consuming partition_reserved_size.
+    if avb_footer_type:
+      size = AVBCalcMinPartitionSize(
+          size,
+          lambda x: AVBCalcMaxImageSize(
+              avbtool, avb_footer_type, x, avb_signing_args))
     prop_dict["partition_size"] = str(size)
     if OPTIONS.verbose:
       print("Allocating %d MB for %s." % (size // BYTES_IN_MB, out_file))
@@ -571,28 +641,15 @@
     partition_size = int(prop_dict.get("partition_size"))
     image_size, verity_size = AdjustPartitionSizeForVerity(
         partition_size, verity_fec_supported)
-    if not image_size:
-      return False
     prop_dict["image_size"] = str(image_size)
     prop_dict["verity_size"] = str(verity_size)
 
-  avb_footer_type = ''
-  if prop_dict.get("avb_hash_enable") == "true":
-    avb_footer_type = 'hash'
-  elif prop_dict.get("avb_hashtree_enable") == "true":
-    avb_footer_type = 'hashtree'
-
   # Adjust the image size for AVB hash footer or AVB hashtree footer.
   if avb_footer_type:
-    avbtool = prop_dict["avb_avbtool"]
     partition_size = prop_dict["partition_size"]
     # avb_add_hash_footer_args or avb_add_hashtree_footer_args.
-    additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
-    max_image_size = AVBCalcMaxImageSize(avbtool, avb_footer_type,
-                                         partition_size, additional_args)
-    if max_image_size <= 0:
-      print("AVBCalcMaxImageSize is <= 0: %d" % max_image_size)
-      return False
+    max_image_size = AVBCalcMaxImageSize(
+        avbtool, avb_footer_type, partition_size, avb_signing_args)
     prop_dict["image_size"] = str(max_image_size)
 
   if fs_type.startswith("ext"):
@@ -615,8 +672,6 @@
       build_command.extend(["-B", prop_dict["block_list"]])
     if "base_fs_file" in prop_dict:
       base_fs_file = ConvertBlockMapToBaseFs(prop_dict["base_fs_file"])
-      if base_fs_file is None:
-        return False
       build_command.extend(["-d", base_fs_file])
     build_command.extend(["-L", prop_dict["mount_point"]])
     if "extfs_inode_count" in prop_dict:
@@ -674,16 +729,17 @@
       build_command.extend(["-T", str(prop_dict["timestamp"])])
     build_command.extend(["-L", prop_dict["mount_point"]])
   else:
-    print("Error: unknown filesystem type '%s'" % (fs_type))
-    return False
+    raise BuildImageError(
+        "Error: unknown filesystem type: {}".format(fs_type))
 
-  (mkfs_output, exit_code) = RunCommand(build_command)
+  mkfs_output, exit_code = RunCommand(build_command)
   if exit_code != 0:
-    print("Error: '%s' failed with exit code %d:\n%s" % (
-        build_command, exit_code, mkfs_output))
-    success, du = GetDiskUsage(in_dir)
-    du_str = ("%d bytes (%d MB)" % (du, du // BYTES_IN_MB)
-             ) if success else "unknown"
+    try:
+      du = GetDiskUsage(in_dir)
+      du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
+    except BuildImageError as e:
+      print(e, file=sys.stderr)
+      du_str = "unknown"
     print(
         "Out of space? The tree size of {} is {}, with reserved space of {} "
         "bytes ({} MB).".format(
@@ -697,64 +753,57 @@
             int(prop_dict["image_size"]) // BYTES_IN_MB,
             int(prop_dict["partition_size"]),
             int(prop_dict["partition_size"]) // BYTES_IN_MB))
-    return False
+
+    raise BuildImageError(
+        "Error: '{}' failed with exit code {}:\n{}".format(
+            build_command, exit_code, mkfs_output))
 
   # Check if there's enough headroom space available for ext4 image.
   if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
-    if not CheckHeadroom(mkfs_output, prop_dict):
-      return False
+    CheckHeadroom(mkfs_output, prop_dict)
 
   if not fs_spans_partition:
     mount_point = prop_dict.get("mount_point")
     image_size = int(prop_dict["image_size"])
     sparse_image_size = GetSimgSize(out_file)
     if sparse_image_size > image_size:
-      print("Error: %s image size of %d is larger than partition size of "
-            "%d" % (mount_point, sparse_image_size, image_size))
-      return False
+      raise BuildImageError(
+          "Error: {} image size of {} is larger than partition size of "
+          "{}".format(mount_point, sparse_image_size, image_size))
     if verity_supported and is_verity_partition:
       ZeroPadSimg(out_file, image_size - sparse_image_size)
 
   # Create the verified image if this is to be verified.
   if verity_supported and is_verity_partition:
-    if not MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict):
-      return False
+    MakeVerityEnabledImage(out_file, verity_fec_supported, prop_dict)
 
   # Add AVB HASH or HASHTREE footer (metadata).
   if avb_footer_type:
-    avbtool = prop_dict["avb_avbtool"]
     partition_size = prop_dict["partition_size"]
     partition_name = prop_dict["partition_name"]
     # key_path and algorithm are only available when chain partition is used.
     key_path = prop_dict.get("avb_key_path")
     algorithm = prop_dict.get("avb_algorithm")
     salt = prop_dict.get("avb_salt")
-    # avb_add_hash_footer_args or avb_add_hashtree_footer_args
-    additional_args = prop_dict["avb_add_" + avb_footer_type + "_footer_args"]
-    if not AVBAddFooter(out_file, avbtool, avb_footer_type,
-                        partition_size, partition_name, key_path,
-                        algorithm, salt, additional_args):
-      return False
+    AVBAddFooter(
+        out_file, avbtool, avb_footer_type, partition_size, partition_name,
+        key_path, algorithm, salt, avb_signing_args)
 
   if run_e2fsck and prop_dict.get("skip_fsck") != "true":
-    success, unsparse_image = UnsparseImage(out_file, replace=False)
-    if not success:
-      return False
+    unsparse_image = UnsparseImage(out_file, replace=False)
 
     # Run e2fsck on the inflated image file
     e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
     # TODO(b/112062612): work around e2fsck failure with SANITIZE_HOST=address
     env4e2fsck = {"ASAN_OPTIONS": "detect_odr_violation=0"}
-    (e2fsck_output, exit_code) = RunCommand(e2fsck_command, env=env4e2fsck)
+    e2fsck_output, exit_code = RunCommand(e2fsck_command, env=env4e2fsck)
 
     os.remove(unsparse_image)
 
     if exit_code != 0:
-      print("Error: '%s' failed with exit code %d:\n%s" % (
-          e2fsck_command, exit_code, e2fsck_output))
-      return False
-
-  return True
+      raise BuildImageError(
+          "Error: '{}' failed with exit code {}:\n{}".format(
+              e2fsck_command, exit_code, e2fsck_output))
 
 
 def ImagePropFromGlobalDict(glob_dict, mount_point):
@@ -1045,10 +1094,12 @@
 
     image_properties = ImagePropFromGlobalDict(glob_dict, mount_point)
 
-  if not BuildImage(in_dir, image_properties, out_file, target_out):
-    print("error: failed to build %s from %s" % (out_file, in_dir),
+  try:
+    BuildImage(in_dir, image_properties, out_file, target_out)
+  except:
+    print("Error: Failed to build {} from {}".format(out_file, in_dir),
           file=sys.stderr)
-    sys.exit(1)
+    raise
 
   if prop_file_out:
     glob_dict_out = GlobalDictFromImageProp(image_properties, mount_point)
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 23dadf1..4e2346c 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -701,7 +701,8 @@
   return tmp
 
 
-def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
+def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
+                   hashtree_info_generator=None):
   """Returns a SparseImage object suitable for passing to BlockImageDiff.
 
   This function loads the specified sparse image from the given path, and
@@ -714,7 +715,8 @@
     tmpdir: The directory that contains the prebuilt image and block map file.
     input_zip: The target-files ZIP archive.
     allow_shared_blocks: Whether having shared blocks is allowed.
-
+    hashtree_info_generator: If present, generates the hashtree_info for this
+        sparse image.
   Returns:
     A SparseImage object, with file_map info loaded.
   """
@@ -732,8 +734,9 @@
   # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
   clobbered_blocks = "0"
 
-  image = sparse_img.SparseImage(path, mappath, clobbered_blocks,
-                                 allow_shared_blocks=allow_shared_blocks)
+  image = sparse_img.SparseImage(
+      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
+      hashtree_info_generator=hashtree_info_generator)
 
   # block.map may contain less blocks, because mke2fs may skip allocating blocks
   # if they contain all zeros. We can't reconstruct such a file from its block
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index e75adf5..755eda9 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -176,6 +176,7 @@
 
 import common
 import edify_generator
+import verity_utils
 
 if sys.hexversion < 0x02070000:
   print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -1411,8 +1412,12 @@
                          target_info.get('ext4_share_dup_blocks') == "true")
   system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
                                      allow_shared_blocks)
+
+  hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+      "system", 4096, target_info)
   system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
-                                     allow_shared_blocks)
+                                     allow_shared_blocks,
+                                     hashtree_info_generator)
 
   blockimgdiff_version = max(
       int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
@@ -1439,8 +1444,11 @@
       raise RuntimeError("can't generate incremental that adds /vendor")
     vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
                                        allow_shared_blocks)
-    vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
-                                       allow_shared_blocks)
+    hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+        "vendor", 4096, target_info)
+    vendor_tgt = common.GetSparseImage(
+        "vendor", OPTIONS.target_tmp, target_zip, allow_shared_blocks,
+        hashtree_info_generator)
 
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 083da7a..ca53ae1 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -33,7 +33,8 @@
   """
 
   def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
-               mode="rb", build_map=True, allow_shared_blocks=False):
+               mode="rb", build_map=True, allow_shared_blocks=False,
+               hashtree_info_generator=None):
     self.simg_f = f = open(simg_fn, mode)
 
     header_bin = f.read(28)
@@ -64,6 +65,8 @@
           % (total_blks, blk_sz, total_chunks))
 
     if not build_map:
+      assert not hashtree_info_generator, \
+        "Cannot generate the hashtree info without building the offset map."
       return
 
     pos = 0   # in blocks
@@ -102,8 +105,18 @@
         if data_sz != 0:
           raise ValueError("Don't care chunk input size is non-zero (%u)" %
                            (data_sz))
-        else:
-          pos += chunk_sz
+        # Fills the don't care data ranges with zeros.
+        # TODO(xunchang) pass the care_map to hashtree info generator.
+        if hashtree_info_generator:
+          fill_data = '\x00' * 4
+          # In order to compute verity hashtree on device, we need to write
+          # zeros explicitly to the don't care ranges. Because these ranges may
+          # contain non-zero data from the previous build.
+          care_data.append(pos)
+          care_data.append(pos + chunk_sz)
+          offset_map.append((pos, chunk_sz, None, fill_data))
+
+        pos += chunk_sz
 
       elif chunk_type == 0xCAC4:
         raise ValueError("CRC32 chunks are not supported")
@@ -128,6 +141,10 @@
     extended = extended.intersect(all_blocks).subtract(self.care_map)
     self.extended = extended
 
+    self.hashtree_info = None
+    if hashtree_info_generator:
+      self.hashtree_info = hashtree_info_generator.Generate(self)
+
     if file_map_fn:
       self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
                             allow_shared_blocks)
@@ -246,6 +263,8 @@
         remaining = remaining.subtract(ranges)
 
     remaining = remaining.subtract(clobbered_blocks)
+    if self.hashtree_info:
+      remaining = remaining.subtract(self.hashtree_info.hashtree_range)
 
     # For all the remaining blocks in the care_map (ie, those that
     # aren't part of the data for any file nor part of the clobbered_blocks),
@@ -308,6 +327,8 @@
         out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
     if clobbered_blocks:
       out["__COPY"] = clobbered_blocks
+    if self.hashtree_info:
+      out["__HASHTREE"] = self.hashtree_info.hashtree_range
 
   def ResetFileMap(self):
     """Throw away the file map and treat the entire image as
diff --git a/tools/releasetools/test_build_image.py b/tools/releasetools/test_build_image.py
index 40a7c85..94c31ee 100644
--- a/tools/releasetools/test_build_image.py
+++ b/tools/releasetools/test_build_image.py
@@ -15,11 +15,15 @@
 #
 
 import filecmp
+import math
 import os.path
+import random
 import unittest
 
 import common
-from build_image import CheckHeadroom, RunCommand, SetUpInDirAndFsConfig
+from build_image import (
+    AVBCalcMinPartitionSize, BLOCK_SIZE, BuildImageError, CheckHeadroom,
+    RunCommand, SetUpInDirAndFsConfig)
 
 
 class BuildImageTest(unittest.TestCase):
@@ -28,6 +32,13 @@
   EXT4FS_OUTPUT = (
       "Created filesystem with 2777/129024 inodes and 515099/516099 blocks")
 
+  def setUp(self):
+    # To test AVBCalcMinPartitionSize(), by using 200MB to 2GB image size.
+    #   -  51200 = 200MB * 1024 * 1024 / 4096
+    #   - 524288 = 2GB * 1024 * 1024 * 1024 / 4096
+    self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
+                         for offset in range(BLOCK_SIZE)]
+
   def tearDown(self):
     common.Cleanup()
 
@@ -38,7 +49,7 @@
         'partition_headroom' : '4096000',
         'mount_point' : 'system',
     }
-    self.assertTrue(CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict))
+    CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict)
 
   def test_CheckHeadroom_InsufficientHeadroom(self):
     # Required headroom: 1001 blocks.
@@ -47,7 +58,8 @@
         'partition_headroom' : '4100096',
         'mount_point' : 'system',
     }
-    self.assertFalse(CheckHeadroom(self.EXT4FS_OUTPUT, prop_dict))
+    self.assertRaises(
+        BuildImageError, CheckHeadroom, self.EXT4FS_OUTPUT, prop_dict)
 
   def test_CheckHeadroom_WrongFsType(self):
     prop_dict = {
@@ -87,14 +99,14 @@
         'partition_headroom' : '40960',
         'mount_point' : 'system',
     }
-    self.assertTrue(CheckHeadroom(ext4fs_output, prop_dict))
+    CheckHeadroom(ext4fs_output, prop_dict)
 
     prop_dict = {
         'fs_type' : 'ext4',
         'partition_headroom' : '413696',
         'mount_point' : 'system',
     }
-    self.assertFalse(CheckHeadroom(ext4fs_output, prop_dict))
+    self.assertRaises(BuildImageError, CheckHeadroom, ext4fs_output, prop_dict)
 
   def test_SetUpInDirAndFsConfig_SystemRootImageTrue_NonSystem(self):
     prop_dict = {
@@ -176,3 +188,51 @@
     self.assertIn('fs-config-system\n', fs_config_data)
     self.assertIn('fs-config-root\n', fs_config_data)
     self.assertEqual('/', prop_dict['mount_point'])
+
+  def test_AVBCalcMinPartitionSize_LinearFooterSize(self):
+    """Tests with footer size which is linear to partition size."""
+    for image_size in self._image_sizes:
+      for ratio in 0.95, 0.56, 0.22:
+        expected_size = common.RoundUpTo4K(int(math.ceil(image_size / ratio)))
+        self.assertEqual(
+            expected_size,
+            AVBCalcMinPartitionSize(image_size, lambda x: int(x * ratio)))
+
+  def test_AVBCalcMinPartitionSize_SlowerGrowthFooterSize(self):
+    """Tests with footer size which grows slower than partition size."""
+
+    def _SizeCalculator(partition_size):
+      """Footer size is the power of 0.95 of partition size."""
+      # Minus footer size to return max image size.
+      return partition_size - int(math.pow(partition_size, 0.95))
+
+    for image_size in self._image_sizes:
+      min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+      # Checks min_partition_size can accommodate image_size.
+      self.assertGreaterEqual(
+          _SizeCalculator(min_partition_size),
+          image_size)
+      # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+      self.assertLess(
+          _SizeCalculator(min_partition_size - BLOCK_SIZE),
+          image_size)
+
+  def test_AVBCalcMinPartitionSize_FasterGrowthFooterSize(self):
+    """Tests with footer size which grows faster than partition size."""
+
+    def _SizeCalculator(partition_size):
+      """Max image size is the power of 0.95 of partition size."""
+      # Max image size grows less than partition size, which means
+      # footer size grows faster than partition size.
+      return int(math.pow(partition_size, 0.95))
+
+    for image_size in self._image_sizes:
+      min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+      # Checks min_partition_size can accommodate image_size.
+      self.assertGreaterEqual(
+          _SizeCalculator(min_partition_size),
+          image_size)
+      # Checks min_partition_size (round to BLOCK_SIZE) is the minimum.
+      self.assertLess(
+          _SizeCalculator(min_partition_size - BLOCK_SIZE),
+          image_size)
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index c7dbffc..0aaf069 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -139,8 +139,7 @@
         'verity_signer_cmd' : 'verity_signer',
         'verity_size' : str(verity_size),
     }
-    self.assertTrue(
-        build_image.MakeVerityEnabledImage(output_file, verity_fec, prop_dict))
+    build_image.MakeVerityEnabledImage(output_file, verity_fec, prop_dict)
 
   def test_ValidateVerifiedBootImages_systemImage(self):
     input_tmp = common.MakeTempDir()
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
new file mode 100644
index 0000000..580612f
--- /dev/null
+++ b/tools/releasetools/test_verity_utils.py
@@ -0,0 +1,168 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Unittests for verity_utils.py."""
+
+from __future__ import print_function
+
+import os
+import os.path
+import unittest
+
+import build_image
+import common
+import sparse_img
+import test_utils
+import verity_utils
+from rangelib import RangeSet
+
+
+class VerityUtilsTest(unittest.TestCase):
+  def setUp(self):
+    self.testdata_dir = test_utils.get_testdata_dir()
+
+    self.partition_size = 1024 * 1024
+    self.prop_dict = {
+        'verity': 'true',
+        'verity_fec': 'true',
+        'system_verity_block_device': '/dev/block/system',
+        'system_size': self.partition_size
+    }
+
+    self.hash_algorithm = "sha256"
+    self.fixed_salt = \
+        "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
+    self.expected_root_hash = \
+        "0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d"
+
+  def tearDown(self):
+    common.Cleanup()
+
+  def _create_simg(self, raw_data):
+    output_file = common.MakeTempFile()
+    raw_image = common.MakeTempFile()
+    with open(raw_image, 'wb') as f:
+      f.write(raw_data)
+
+    cmd = ["img2simg", raw_image, output_file, '4096']
+    p = common.Run(cmd)
+    p.communicate()
+    self.assertEqual(0, p.returncode)
+
+    return output_file
+
+  def _generate_image(self):
+    partition_size = 1024 * 1024
+    adjusted_size, verity_size = build_image.AdjustPartitionSizeForVerity(
+        partition_size, True)
+
+    raw_image = ""
+    for i in range(adjusted_size):
+      raw_image += str(i % 10)
+
+    output_file = self._create_simg(raw_image)
+
+    # Append the verity metadata.
+    prop_dict = {
+        'partition_size': str(partition_size),
+        'image_size': str(adjusted_size),
+        'verity_block_device': '/dev/block/system',
+        'verity_key': os.path.join(self.testdata_dir, 'testkey'),
+        'verity_signer_cmd': 'verity_signer',
+        'verity_size': str(verity_size),
+    }
+    build_image.MakeVerityEnabledImage(output_file, True, prop_dict)
+
+    return output_file
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_create(self):
+    image_file = sparse_img.SparseImage(self._generate_image())
+
+    generator = verity_utils.CreateHashtreeInfoGenerator(
+        'system', image_file, self.prop_dict)
+    self.assertEqual(
+        verity_utils.VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
+    self.assertEqual(self.partition_size, generator.partition_size)
+    self.assertTrue(generator.fec_supported)
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_decomposeImage(self):
+    image_file = sparse_img.SparseImage(self._generate_image())
+
+    generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+        self.partition_size, 4096, True)
+    generator.DecomposeSparseImage(image_file)
+    self.assertEqual(991232, generator.filesystem_size)
+    self.assertEqual(12288, generator.hashtree_size)
+    self.assertEqual(32768, generator.metadata_size)
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_parseHashtreeMetadata(
+      self):
+    image_file = sparse_img.SparseImage(self._generate_image())
+    generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+        self.partition_size, 4096, True)
+    generator.DecomposeSparseImage(image_file)
+
+    generator._ParseHashtreeMetadata()
+
+    self.assertEqual(
+        self.hash_algorithm, generator.hashtree_info.hash_algorithm)
+    self.assertEqual(self.fixed_salt, generator.hashtree_info.salt)
+    self.assertEqual(self.expected_root_hash, generator.hashtree_info.root_hash)
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_smoke(
+      self):
+    generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+        self.partition_size, 4096, True)
+    generator.image = sparse_img.SparseImage(self._generate_image())
+
+    generator.hashtree_info = info = verity_utils.HashtreeInfo()
+    info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
+    info.hashtree_range = RangeSet(
+        data=[991232 / 4096, (991232 + 12288) / 4096])
+    info.hash_algorithm = self.hash_algorithm
+    info.salt = self.fixed_salt
+    info.root_hash = self.expected_root_hash
+
+    self.assertTrue(generator.ValidateHashtree())
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_failure(
+      self):
+    generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+        self.partition_size, 4096, True)
+    generator.image = sparse_img.SparseImage(self._generate_image())
+
+    generator.hashtree_info = info = verity_utils.HashtreeInfo()
+    info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
+    info.hashtree_range = RangeSet(
+        data=[991232 / 4096, (991232 + 12288) / 4096])
+    info.hash_algorithm = self.hash_algorithm
+    info.salt = self.fixed_salt
+    info.root_hash = "a" + self.expected_root_hash[1:]
+
+    self.assertFalse(generator.ValidateHashtree())
+
+  def test_VerifiedBootVersion1HashtreeInfoGenerator_generate(self):
+    image_file = sparse_img.SparseImage(self._generate_image())
+    generator = verity_utils.CreateHashtreeInfoGenerator(
+        'system', 4096, self.prop_dict)
+    info = generator.Generate(image_file)
+
+    self.assertEqual(RangeSet(data=[0, 991232 / 4096]), info.filesystem_range)
+    self.assertEqual(RangeSet(data=[991232 / 4096, (991232 + 12288) / 4096]),
+                     info.hashtree_range)
+    self.assertEqual(self.hash_algorithm, info.hash_algorithm)
+    self.assertEqual(self.fixed_salt, info.salt)
+    self.assertEqual(self.expected_root_hash, info.root_hash)
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
new file mode 100644
index 0000000..0e605b1
--- /dev/null
+++ b/tools/releasetools/verity_utils.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
+import struct
+
+import common
+from build_image import (AdjustPartitionSizeForVerity, GetVerityTreeSize,
+                         GetVerityMetadataSize, BuildVerityTree)
+from rangelib import RangeSet
+
+
+class HashtreeInfoGenerationError(Exception):
+  """An Exception raised during hashtree info generation."""
+
+  def __init__(self, message):
+    Exception.__init__(self, message)
+
+
+class HashtreeInfo(object):
+  def __init__(self):
+    self.hashtree_range = None
+    self.filesystem_range = None
+    self.hash_algorithm = None
+    self.salt = None
+    self.root_hash = None
+
+
+def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
+  generator = None
+  if (info_dict.get("verity") == "true" and
+      info_dict.get("{}_verity_block_device".format(partition_name))):
+    partition_size = info_dict["{}_size".format(partition_name)]
+    fec_supported = info_dict.get("verity_fec") == "true"
+    generator = VerifiedBootVersion1HashtreeInfoGenerator(
+        partition_size, block_size, fec_supported)
+
+  return generator
+
+
+class HashtreeInfoGenerator(object):
+  def Generate(self, image):
+    raise NotImplementedError
+
+  def DecomposeSparseImage(self, image):
+    raise NotImplementedError
+
+  def ValidateHashtree(self):
+    raise NotImplementedError
+
+
+class VerifiedBootVersion2HashtreeInfoGenerator(HashtreeInfoGenerator):
+  pass
+
+
+class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
+  """A class that parses the metadata of hashtree for a given partition."""
+
+  def __init__(self, partition_size, block_size, fec_supported):
+    """Initialize VerityTreeInfo with the sparse image and input property.
+
+    Arguments:
+      partition_size: The whole size in bytes of a partition, including the
+        filesystem size, padding size, and verity size.
+      block_size: Expected size in bytes of each block for the sparse image.
+      fec_supported: True if the verity section contains fec data.
+    """
+
+    self.block_size = block_size
+    self.partition_size = partition_size
+    self.fec_supported = fec_supported
+
+    self.image = None
+    self.filesystem_size = None
+    self.hashtree_size = None
+    self.metadata_size = None
+
+    self.hashtree_info = HashtreeInfo()
+
+  def DecomposeSparseImage(self, image):
+    """Calculate the verity size based on the size of the input image.
+
+    Since we already know the structure of a verity enabled image to be:
+    [filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
+    calculate the size and offset of each section.
+    """
+
+    self.image = image
+    assert self.block_size == image.blocksize
+    assert self.partition_size == image.total_blocks * self.block_size, \
+        "partition size {} doesn't match with the calculated image size." \
+        " total_blocks: {}".format(self.partition_size, image.total_blocks)
+
+    adjusted_size, _ = AdjustPartitionSizeForVerity(
+        self.partition_size, self.fec_supported)
+    assert adjusted_size % self.block_size == 0
+
+    verity_tree_size = GetVerityTreeSize(adjusted_size)
+    assert verity_tree_size % self.block_size == 0
+
+    metadata_size = GetVerityMetadataSize(adjusted_size)
+    assert metadata_size % self.block_size == 0
+
+    self.filesystem_size = adjusted_size
+    self.hashtree_size = verity_tree_size
+    self.metadata_size = metadata_size
+
+    self.hashtree_info.filesystem_range = RangeSet(
+        data=[0, adjusted_size / self.block_size])
+    self.hashtree_info.hashtree_range = RangeSet(
+        data=[adjusted_size / self.block_size,
+              (adjusted_size + verity_tree_size) / self.block_size])
+
+  def _ParseHashtreeMetadata(self):
+    """Parses the hash_algorithm, root_hash, salt from the metadata block."""
+
+    metadata_start = self.filesystem_size + self.hashtree_size
+    metadata_range = RangeSet(
+        data=[metadata_start / self.block_size,
+              (metadata_start + self.metadata_size) / self.block_size])
+    meta_data = ''.join(self.image.ReadRangeSet(metadata_range))
+
+    # More info about the metadata structure available in:
+    # system/extras/verity/build_verity_metadata.py
+    META_HEADER_SIZE = 268
+    header_bin = meta_data[0:META_HEADER_SIZE]
+    header = struct.unpack("II256sI", header_bin)
+
+    # header: magic_number, version, signature, table_len
+    assert header[0] == 0xb001b001, header[0]
+    table_len = header[3]
+    verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
+    table_entries = verity_table.rstrip().split()
+
+    # Expected verity table format: "1 block_device block_device block_size
+    # block_size data_blocks data_blocks hash_algorithm root_hash salt"
+    assert len(table_entries) == 10, "Unexpected verity table size {}".format(
+        len(table_entries))
+    assert (int(table_entries[3]) == self.block_size and
+            int(table_entries[4]) == self.block_size)
+    assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
+            int(table_entries[6]) * self.block_size == self.filesystem_size)
+
+    self.hashtree_info.hash_algorithm = table_entries[7]
+    self.hashtree_info.root_hash = table_entries[8]
+    self.hashtree_info.salt = table_entries[9]
+
+  def ValidateHashtree(self):
+    """Checks that we can reconstruct the verity hash tree."""
+
+    # Writes the file system section to a temp file; and calls the executable
+    # build_verity_tree to construct the hash tree.
+    adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
+    with open(adjusted_partition, "wb") as fd:
+      self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
+
+    generated_verity_tree = common.MakeTempFile(prefix="verity")
+    prop_dict = {}
+    BuildVerityTree(adjusted_partition, generated_verity_tree, prop_dict)
+
+    assert prop_dict["verity_salt"] == self.hashtree_info.salt
+    if prop_dict["verity_root_hash"] != self.hashtree_info.root_hash:
+      print("Calculated verty root hash {} doesn't match the one in metadata"
+            " {}".format(prop_dict["verity_root_hash"],
+                         self.hashtree_info.root_hash))
+      return False
+
+    # Reads the generated hash tree and checks if it has the exact same bytes
+    # as the one in the sparse image.
+    with open(generated_verity_tree, "rb") as fd:
+      return fd.read() == ''.join(self.image.ReadRangeSet(
+          self.hashtree_info.hashtree_range))
+
+  def Generate(self, image):
+    """Parses and validates the hashtree info in a sparse image.
+
+    Returns:
+      hashtree_info: The information needed to reconstruct the hashtree.
+    Raises:
+      HashtreeInfoGenerationError: If we fail to generate the exact bytes of
+          the hashtree.
+    """
+
+    self.DecomposeSparseImage(image)
+    self._ParseHashtreeMetadata()
+
+    if not self.ValidateHashtree():
+      raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
+
+    return self.hashtree_info