Merge "Remove usages of jill.jar, use jack instead"
diff --git a/core/definitions.mk b/core/definitions.mk
index d2f973f..4a6ec02 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2283,11 +2283,23 @@
 fi
 endef
 
+# Returns the minSdkVersion of the specified APK as a decimal number. If the
+# version is a codename, returns the current platform SDK version (always a
+# decimal number) instead. If the APK does not specify a minSdkVersion, returns
+# 0 to match how the Android platform interprets this situation at runtime.
+#
+define get-package-min-sdk-version-int
+$$(($(AAPT) dump badging $(1) 2>&1 | grep '^sdkVersion' || echo "sdkVersion:'0'") \
+    | cut -d"'" -f2 | \
+    sed -e s/^$(PLATFORM_VERSION_CODENAME)$$/$(PLATFORM_SDK_VERSION)/)
+endef
+
 # Sign a package using the specified key/cert.
 #
 define sign-package
 $(hide) mv $@ $@.unsigned
 $(hide) java -Djava.library.path=$(SIGNAPK_JNI_LIBRARY_PATH) -jar $(SIGNAPK_JAR) \
+    --min-sdk-version $(call get-package-min-sdk-version-int,$@.unsigned) \
     $(PRIVATE_CERTIFICATE) $(PRIVATE_PRIVATE_KEY) \
     $(PRIVATE_ADDITIONAL_CERTIFICATES) $@.unsigned $@.signed
 $(hide) mv $@.signed $@
diff --git a/core/module_arch_supported.mk b/core/module_arch_supported.mk
index 9f05060..62e2643 100644
--- a/core/module_arch_supported.mk
+++ b/core/module_arch_supported.mk
@@ -12,7 +12,7 @@
 ## LOCAL_MODULE_HOST_OS
 ##
 ## Inputs from build system:
-## $(my_prefix)IS_64_BIT
+## $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)IS_64_BIT
 ## LOCAL_2ND_ARCH_VAR_PREFIX
 ##
 ## Outputs:
@@ -25,19 +25,18 @@
 my_module_arch_supported := false
 endif
 
-ifeq ($(LOCAL_2ND_ARCH_VAR_PREFIX),)
-ifeq ($($(my_prefix)IS_64_BIT)|$(my_module_multilib),true|32)
-my_module_arch_supported := false
-else ifeq ($($(my_prefix)IS_64_BIT)|$(my_module_multilib),|64)
+ifeq ($($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)IS_64_BIT)|$(my_module_multilib),true|32)
 my_module_arch_supported := false
 endif
-else # LOCAL_2ND_ARCH_VAR_PREFIX
+ifeq ($($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)IS_64_BIT)|$(my_module_multilib),|64)
+my_module_arch_supported := false
+endif
+
+ifneq ($(LOCAL_2ND_ARCH_VAR_PREFIX),)
 ifeq ($(my_module_multilib),first)
 my_module_arch_supported := false
-else ifeq ($(my_module_multilib),64)
-my_module_arch_supported := false
 endif
-endif # LOCAL_2ND_ARCH_VAR_PREFIX
+endif
 
 ifneq (,$(LOCAL_MODULE_$(my_prefix)ARCH))
 ifeq (,$(filter $($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH),$(LOCAL_MODULE_$(my_prefix)ARCH)))
diff --git a/core/prebuilt.mk b/core/prebuilt.mk
index ed25f71..caa9909 100644
--- a/core/prebuilt.mk
+++ b/core/prebuilt.mk
@@ -72,6 +72,26 @@
 endif
 LOCAL_HOST_PREFIX :=
 endif
+
+ifdef HOST_CROSS_2ND_ARCH
+my_prefix := HOST_CROSS_
+LOCAL_2ND_ARCH_VAR_PREFIX := $($(my_prefix)2ND_ARCH_VAR_PREFIX)
+LOCAL_HOST_PREFIX := $(my_prefix)
+include $(BUILD_SYSTEM)/module_arch_supported.mk
+ifeq ($(my_module_arch_supported),true)
+$(warning $(LOCAL_MODULE) $(LOCAL_2ND_ARCH_VAR_PREFIX))
+OVERRIDE_BUILT_MODULE_PATH :=
+LOCAL_BUILT_MODULE :=
+LOCAL_INSTALLED_MODULE :=
+LOCAL_MODULE_STEM :=
+LOCAL_BUILT_MODULE_STEM :=
+LOCAL_INSTALLED_MODULE_STEM :=
+LOCAL_INTERMEDIATE_TARGETS :=
+include $(BUILD_SYSTEM)/prebuilt_internal.mk
+endif
+LOCAL_HOST_PREFIX :=
+LOCAL_2ND_ARCH_VAR_PREFIX :=
+endif
 endif
 endif
 
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index fa8c2e5..3578c46 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -216,7 +216,7 @@
 endif
 $(built_module): PRIVATE_EMBEDDED_JNI_LIBS := $(embedded_prebuilt_jni_libs)
 
-$(built_module) : $(my_prebuilt_src_file) | $(ACP) $(ZIPALIGN) $(SIGNAPK_JAR)
+$(built_module) : $(my_prebuilt_src_file) | $(ACP) $(ZIPALIGN) $(SIGNAPK_JAR) $(AAPT)
 	$(transform-prebuilt-to-target)
 	$(uncompress-shared-libs)
 ifneq ($(LOCAL_CERTIFICATE),PRESIGNED)
@@ -255,7 +255,7 @@
 
 $(built_apk_splits) : PRIVATE_PRIVATE_KEY := $(LOCAL_CERTIFICATE).pk8
 $(built_apk_splits) : PRIVATE_CERTIFICATE := $(LOCAL_CERTIFICATE).x509.pem
-$(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk | $(ACP)
+$(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk | $(ACP) $(AAPT)
 	$(copy-file-to-new-target)
 	$(sign-package)
 
diff --git a/core/soong.mk b/core/soong.mk
index 646c68e..578aac9 100644
--- a/core/soong.mk
+++ b/core/soong.mk
@@ -45,7 +45,11 @@
 	echo '    "DeviceSecondaryAbi": ["$(TARGET_2ND_CPU_ABI)", "$(TARGET_2ND_CPU_ABI2)"],'; \
 	echo ''; \
 	echo '    "HostArch": "$(HOST_ARCH)",'; \
-	echo '    "HostSecondaryArch": "$(HOST_2ND_ARCH)"'; \
+	echo '    "HostSecondaryArch": "$(HOST_2ND_ARCH)",'; \
+	echo ''; \
+	echo '    "CrossHost": "$(HOST_CROSS_OS)",'; \
+	echo '    "CrossHostArch": "$(HOST_CROSS_ARCH)",'; \
+	echo '    "CrossHostSecondaryArch": "$(HOST_CROSS_2ND_ARCH)"'; \
 	echo '}') > $(SOONG_VARIABLES_TMP); \
 	if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 1d338ee..25d2743 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -16,7 +16,9 @@
 
 from collections import deque, OrderedDict
 from hashlib import sha1
+import array
 import common
+import functools
 import heapq
 import itertools
 import multiprocessing
@@ -24,6 +26,7 @@
 import re
 import subprocess
 import threading
+import time
 import tempfile
 
 from rangelib import RangeSet
@@ -204,6 +207,23 @@
             " to " + str(self.tgt_ranges) + ">")
 
 
+@functools.total_ordering
+class HeapItem(object):
+  def __init__(self, item):
+    self.item = item
+    # Negate the score since python's heap is a min-heap and we want
+    # the maximum score.
+    self.score = -item.score
+  def clear(self):
+    self.item = None
+  def __bool__(self):
+    return self.item is None
+  def __eq__(self, other):
+    return self.score == other.score
+  def __le__(self, other):
+    return self.score <= other.score
+
+
 # BlockImageDiff works on two image objects.  An image object is
 # anything that provides the following attributes:
 #
@@ -251,6 +271,7 @@
     self.transfers = []
     self.src_basenames = {}
     self.src_numpatterns = {}
+    self._max_stashed_size = 0
 
     assert version in (1, 2, 3, 4)
 
@@ -268,6 +289,10 @@
     self.AssertPartition(src.care_map, src.file_map.values())
     self.AssertPartition(tgt.care_map, tgt.file_map.values())
 
+  @property
+  def max_stashed_size(self):
+    return self._max_stashed_size
+
   def Compute(self, prefix):
     # When looking for a source file to use as the diff input for a
     # target file, we try:
@@ -538,17 +563,17 @@
         f.write(i)
 
     if self.version >= 2:
-      max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+      self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
       OPTIONS = common.OPTIONS
       if OPTIONS.cache_size is not None:
         max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
         print("max stashed blocks: %d  (%d bytes), "
               "limit: %d bytes (%.2f%%)\n" % (
-              max_stashed_blocks, max_stashed_size, max_allowed,
-              max_stashed_size * 100.0 / max_allowed))
+              max_stashed_blocks, self._max_stashed_size, max_allowed,
+              self._max_stashed_size * 100.0 / max_allowed))
       else:
         print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-              max_stashed_blocks, max_stashed_size))
+              max_stashed_blocks, self._max_stashed_size))
 
   def ReviseStashSize(self):
     print("Revising stash size...")
@@ -734,7 +759,7 @@
     # - we write every block we care about exactly once.
 
     # Start with no blocks having been touched yet.
-    touched = RangeSet()
+    touched = array.array("B", "\0" * self.tgt.total_blocks)
 
     # Imagine processing the transfers in order.
     for xf in self.transfers:
@@ -745,14 +770,22 @@
         for _, sr in xf.use_stash:
           x = x.subtract(sr)
 
-      assert not touched.overlaps(x)
-      # Check that the output blocks for this transfer haven't yet been touched.
-      assert not touched.overlaps(xf.tgt_ranges)
-      # Touch all the blocks written by this transfer.
-      touched = touched.union(xf.tgt_ranges)
+      for s, e in x:
+        for i in range(s, e):
+          assert touched[i] == 0
+
+      # Check that the output blocks for this transfer haven't yet
+      # been touched, and touch all the blocks written by this
+      # transfer.
+      for s, e in xf.tgt_ranges:
+        for i in range(s, e):
+          assert touched[i] == 0
+          touched[i] = 1
 
     # Check that we've written every target block.
-    assert touched == self.tgt.care_map
+    for s, e in self.tgt.care_map:
+      for i in range(s, e):
+        assert touched[i] == 1
 
   def ImproveVertexSequence(self):
     print("Improving vertex order...")
@@ -889,6 +922,7 @@
     for xf in self.transfers:
       xf.incoming = xf.goes_after.copy()
       xf.outgoing = xf.goes_before.copy()
+      xf.score = sum(xf.outgoing.values()) - sum(xf.incoming.values())
 
     # We use an OrderedDict instead of just a set so that the output
     # is repeatable; otherwise it would depend on the hash values of
@@ -899,52 +933,67 @@
     s1 = deque()  # the left side of the sequence, built from left to right
     s2 = deque()  # the right side of the sequence, built from right to left
 
-    while G:
+    heap = []
+    for xf in self.transfers:
+      xf.heap_item = HeapItem(xf)
+      heap.append(xf.heap_item)
+    heapq.heapify(heap)
 
+    sinks = set(u for u in G if not u.outgoing)
+    sources = set(u for u in G if not u.incoming)
+
+    def adjust_score(iu, delta):
+      iu.score += delta
+      iu.heap_item.clear()
+      iu.heap_item = HeapItem(iu)
+      heapq.heappush(heap, iu.heap_item)
+
+    while G:
       # Put all sinks at the end of the sequence.
-      while True:
-        sinks = [u for u in G if not u.outgoing]
-        if not sinks:
-          break
+      while sinks:
+        new_sinks = set()
         for u in sinks:
+          if u not in G: continue
           s2.appendleft(u)
           del G[u]
           for iu in u.incoming:
-            del iu.outgoing[u]
+            adjust_score(iu, -iu.outgoing.pop(u))
+            if not iu.outgoing: new_sinks.add(iu)
+        sinks = new_sinks
 
       # Put all the sources at the beginning of the sequence.
-      while True:
-        sources = [u for u in G if not u.incoming]
-        if not sources:
-          break
+      while sources:
+        new_sources = set()
         for u in sources:
+          if u not in G: continue
           s1.append(u)
           del G[u]
           for iu in u.outgoing:
-            del iu.incoming[u]
+            adjust_score(iu, +iu.incoming.pop(u))
+            if not iu.incoming: new_sources.add(iu)
+        sources = new_sources
 
-      if not G:
-        break
+      if not G: break
 
       # Find the "best" vertex to put next.  "Best" is the one that
       # maximizes the net difference in source blocks saved we get by
       # pretending it's a source rather than a sink.
 
-      max_d = None
-      best_u = None
-      for u in G:
-        d = sum(u.outgoing.values()) - sum(u.incoming.values())
-        if best_u is None or d > max_d:
-          max_d = d
-          best_u = u
+      while True:
+        u = heapq.heappop(heap)
+        if u and u.item in G:
+          u = u.item
+          break
 
-      u = best_u
       s1.append(u)
       del G[u]
       for iu in u.outgoing:
-        del iu.incoming[u]
+        adjust_score(iu, +iu.incoming.pop(u))
+        if not iu.incoming: sources.add(iu)
+
       for iu in u.incoming:
-        del iu.outgoing[u]
+        adjust_score(iu, -iu.outgoing.pop(u))
+        if not iu.outgoing: sinks.add(iu)
 
     # Now record the sequence in the 'order' field of each transfer,
     # and by rearranging self.transfers to be in the chosen sequence.
@@ -960,10 +1009,38 @@
 
   def GenerateDigraph(self):
     print("Generating digraph...")
+
+    # Each item of source_ranges will be:
+    #   - None, if that block is not used as a source,
+    #   - a transfer, if one transfer uses it as a source, or
+    #   - a set of transfers.
+    source_ranges = []
+    for b in self.transfers:
+      for s, e in b.src_ranges:
+        if e > len(source_ranges):
+          source_ranges.extend([None] * (e-len(source_ranges)))
+        for i in range(s, e):
+          if source_ranges[i] is None:
+            source_ranges[i] = b
+          else:
+            if not isinstance(source_ranges[i], set):
+              source_ranges[i] = set([source_ranges[i]])
+            source_ranges[i].add(b)
+
     for a in self.transfers:
-      for b in self.transfers:
-        if a is b:
-          continue
+      intersections = set()
+      for s, e in a.tgt_ranges:
+        for i in range(s, e):
+          if i >= len(source_ranges): break
+          b = source_ranges[i]
+          if b is not None:
+            if isinstance(b, set):
+              intersections.update(b)
+            else:
+              intersections.add(b)
+
+      for b in intersections:
+        if a is b: continue
 
         # If the blocks written by A are read by B, then B needs to go before A.
         i = a.tgt_ranges.intersect(b.src_ranges)
@@ -1092,6 +1169,7 @@
     """Assert that all the RangeSets in 'seq' form a partition of the
     'total' RangeSet (ie, they are nonintersecting and their union
     equals 'total')."""
+
     so_far = RangeSet()
     for i in seq:
       assert not so_far.overlaps(i)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index e6ad18b..e248860 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -28,6 +28,7 @@
 import commands
 import common
 import shutil
+import sparse_img
 import tempfile
 
 OPTIONS = common.OPTIONS
@@ -91,6 +92,16 @@
     return verity_size + fec_size
   return verity_size
 
+def GetSimgSize(image_file):
+  simg = sparse_img.SparseImage(image_file, build_map=False)
+  return simg.blocksize * simg.total_blocks
+
+def ZeroPadSimg(image_file, pad_size):
+  blocks = pad_size // BLOCK_SIZE
+  print("Padding %d blocks (%d bytes)" % (blocks, pad_size))
+  simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
+  simg.AppendFillChunk(0, blocks)
+
 def AdjustPartitionSizeForVerity(partition_size, fec_supported):
   """Modifies the provided partition size to account for the verity metadata.
 
@@ -329,7 +340,7 @@
 
   # Adjust the partition size to make room for the hashes if this is to be
   # verified.
-  if verity_supported and is_verity_partition and fs_spans_partition:
+  if verity_supported and is_verity_partition:
     partition_size = int(prop_dict.get("partition_size"))
     adjusted_size = AdjustPartitionSizeForVerity(partition_size,
                                                  verity_fec_supported)
@@ -440,17 +451,13 @@
   if not fs_spans_partition:
     mount_point = prop_dict.get("mount_point")
     partition_size = int(prop_dict.get("partition_size"))
-    image_size = os.stat(out_file).st_size
+    image_size = GetSimgSize(out_file)
     if image_size > partition_size:
       print("Error: %s image size of %d is larger than partition size of "
             "%d" % (mount_point, image_size, partition_size))
       return False
     if verity_supported and is_verity_partition:
-      if 2 * image_size - AdjustPartitionSizeForVerity(image_size, verity_fec_supported) > partition_size:
-        print "Error: No more room on %s to fit verity data" % mount_point
-        return False
-    prop_dict["original_partition_size"] = prop_dict["partition_size"]
-    prop_dict["partition_size"] = str(image_size)
+      ZeroPadSimg(out_file, partition_size - image_size)
 
   # create the verified image if this is to be verified
   if verity_supported and is_verity_partition:
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 95aeb62..cde49cd 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -30,7 +30,6 @@
 import zipfile
 
 import blockimgdiff
-import rangelib
 
 from hashlib import sha1 as sha1
 
@@ -592,7 +591,46 @@
   return key_passwords
 
 
-def SignFile(input_name, output_name, key, password, whole_file=False):
+def GetMinSdkVersion(apk_name):
+  """Get the minSdkVersion delared in the APK. This can be both a decimal number
+  (API Level) or a codename.
+  """
+
+  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
+  output, err = p.communicate()
+  if err:
+    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
+        % (p.returncode,))
+
+  for line in output.split("\n"):
+    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
+    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
+    if m:
+      return m.group(1)
+  raise ExternalError("No minSdkVersion returned by aapt")
+
+
+def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
+  """Get the minSdkVersion declared in the APK as a number (API Level). If
+  minSdkVersion is set to a codename, it is translated to a number using the
+  provided map.
+  """
+
+  version = GetMinSdkVersion(apk_name)
+  try:
+    return int(version)
+  except ValueError:
+    # Not a decimal number. Codename?
+    if version in codename_to_api_level_map:
+      return codename_to_api_level_map[version]
+    else:
+      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
+                          % (version, codename_to_api_level_map))
+
+
+def SignFile(input_name, output_name, key, password, min_api_level=None,
+    codename_to_api_level_map=dict(),
+    whole_file=False):
   """Sign the input_name zip/jar/apk, producing output_name.  Use the
   given key and password (the latter may be None if the key does not
   have a password.
@@ -600,6 +638,13 @@
   If whole_file is true, use the "-w" option to SignApk to embed a
   signature that covers the whole file in the archive comment of the
   zip file.
+
+  min_api_level is the API Level (int) of the oldest platform this file may end
+  up on. If not specified for an APK, the API Level is obtained by interpreting
+  the minSdkVersion attribute of the APK's AndroidManifest.xml.
+
+  codename_to_api_level_map is needed to translate the codename which may be
+  encountered as the APK's minSdkVersion.
   """
 
   java_library_path = os.path.join(
@@ -612,6 +657,15 @@
   cmd.extend(OPTIONS.extra_signapk_args)
   if whole_file:
     cmd.append("-w")
+
+  min_sdk_version = min_api_level
+  if min_sdk_version is None:
+    if not whole_file:
+      min_sdk_version = GetMinSdkVersionInt(
+          input_name, codename_to_api_level_map)
+  if min_sdk_version is not None:
+    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
+
   cmd.extend([key + OPTIONS.public_key_suffix,
               key + OPTIONS.private_key_suffix,
               input_name, output_name])
@@ -1255,6 +1309,7 @@
     OPTIONS.tempfiles.append(tmpdir)
     self.path = os.path.join(tmpdir, partition)
     b.Compute(self.path)
+    self._required_cache = b.max_stashed_size
 
     if src is None:
       _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
@@ -1262,6 +1317,10 @@
       _, self.device = GetTypeAndDevice("/" + partition,
                                         OPTIONS.source_info_dict)
 
+  @property
+  def required_cache(self):
+    return self._required_cache
+
   def WriteScript(self, script, output_zip, progress=None):
     if not self.src:
       # write the output unconditionally
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index d923cc8..f57360a 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -23,6 +23,7 @@
   def __init__(self, version, info, fstab=None):
     self.script = []
     self.mounts = set()
+    self._required_cache = 0
     self.version = version
     self.info = info
     if fstab is None:
@@ -38,6 +39,11 @@
     x.mounts = self.mounts
     return x
 
+  @property
+  def required_cache(self):
+    """Return the minimum cache size to apply the update."""
+    return self._required_cache
+
   @staticmethod
   def WordWrap(cmd, linelen=80):
     """'cmd' should be a function call with null characters after each
@@ -171,6 +177,7 @@
   def CacheFreeSpaceCheck(self, amount):
     """Check that there's at least 'amount' space that can be made
     available on /cache."""
+    self._required_cache = max(self._required_cache, amount)
     self.script.append(('apply_patch_space(%d) || abort("Not enough free space '
                         'on /cache to apply patches.");') % (amount,))
 
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index ed300a7..bea33a3 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -542,6 +542,8 @@
   has_recovery_patch = HasRecoveryPatch(input_zip)
   block_based = OPTIONS.block_based and has_recovery_patch
 
+  metadata["ota-type"] = "BLOCK" if block_based else "FILE"
+
   if not OPTIONS.omit_prereq:
     ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
     ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
@@ -697,6 +699,8 @@
 endif;
 """ % bcb_dev)
   script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -773,6 +777,7 @@
                                    OPTIONS.source_info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc",
                                      OPTIONS.target_info_dict),
+      "ota-type": "BLOCK",
   }
 
   device_specific = common.DeviceSpecificParams(
@@ -815,7 +820,7 @@
   # Check first block of system partition for remount R/W only if
   # disk type is ext4
   system_partition = OPTIONS.source_info_dict["fstab"]["/system"]
-  check_first_block = system_partition.fs_type=="ext4"
+  check_first_block = system_partition.fs_type == "ext4"
   system_diff = common.BlockDifference("system", system_tgt, system_src,
                                        check_first_block,
                                        version=blockimgdiff_version)
@@ -831,7 +836,7 @@
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
     vendor_partition = OPTIONS.source_info_dict["fstab"]["/vendor"]
-    check_first_block = vendor_partition.fs_type=="ext4"
+    check_first_block = vendor_partition.fs_type == "ext4"
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
                                          check_first_block,
                                          version=blockimgdiff_version)
@@ -910,6 +915,13 @@
           GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
           GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
 
+  # Check the required cache size (i.e. stashed blocks).
+  size = []
+  if system_diff:
+    size.append(system_diff.required_cache)
+  if vendor_diff:
+    size.append(vendor_diff.required_cache)
+
   if updating_boot:
     boot_type, boot_device = common.GetTypeAndDevice(
         "/boot", OPTIONS.source_info_dict)
@@ -930,6 +942,10 @@
                         (boot_type, boot_device,
                          source_boot.size, source_boot.sha1,
                          target_boot.size, target_boot.sha1))
+      size.append(target_boot.size)
+
+  if size:
+    script.CacheFreeSpaceCheck(max(size))
 
   device_specific.IncrementalOTA_VerifyEnd()
 
@@ -1003,6 +1019,7 @@
 
   script.SetProgress(1)
   script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -1076,6 +1093,7 @@
 
   script.SetProgress(1.0)
   script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
@@ -1119,6 +1137,8 @@
       "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
                                    OPTIONS.info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
+      "ota-required-cache": "0",
+      "ota-type": "AB",
   }
 
   if source_file is not None:
@@ -1374,6 +1394,7 @@
                                    OPTIONS.source_info_dict),
       "post-timestamp": GetBuildProp("ro.build.date.utc",
                                      OPTIONS.target_info_dict),
+      "ota-type": "FILE",
   }
 
   device_specific = common.DeviceSpecificParams(
@@ -1487,6 +1508,13 @@
   if vendor_diff:
     so_far += vendor_diff.EmitVerification(script)
 
+  size = []
+  if system_diff.patch_list:
+    size.append(system_diff.largest_source_size)
+  if vendor_diff:
+    if vendor_diff.patch_list:
+      size.append(vendor_diff.largest_source_size)
+
   if updating_boot:
     d = common.Difference(target_boot, source_boot)
     _, _, d = d.ComputePatch()
@@ -1503,14 +1531,9 @@
                        source_boot.size, source_boot.sha1,
                        target_boot.size, target_boot.sha1))
     so_far += source_boot.size
+    size.append(target_boot.size)
 
-  size = []
-  if system_diff.patch_list:
-    size.append(system_diff.largest_source_size)
-  if vendor_diff:
-    if vendor_diff.patch_list:
-      size.append(vendor_diff.largest_source_size)
-  if size or updating_recovery or updating_boot:
+  if size:
     script.CacheFreeSpaceCheck(max(size))
 
   device_specific.IncrementalOTA_VerifyEnd()
@@ -1723,6 +1746,7 @@
     vendor_diff.EmitExplicitTargetVerification(script)
   script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
 
+  metadata["ota-required-cache"] = str(script.required_cache)
   WriteMetadata(metadata, output_zip)
 
 
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index baf60f5..8941e35 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -127,14 +127,34 @@
     sys.exit(1)
 
 
-def SignApk(data, keyname, pw):
+def SignApk(data, keyname, pw, platform_api_level, codename_to_api_level_map):
   unsigned = tempfile.NamedTemporaryFile()
   unsigned.write(data)
   unsigned.flush()
 
   signed = tempfile.NamedTemporaryFile()
 
-  common.SignFile(unsigned.name, signed.name, keyname, pw)
+  # For pre-N builds, don't upgrade to SHA-256 JAR signatures based on the APK's
+  # minSdkVersion to avoid increasing incremental OTA update sizes. If an APK
+  # didn't change, we don't want its signature to change due to the switch
+  # from SHA-1 to SHA-256.
+  # By default, APK signer chooses SHA-256 signatures if the APK's minSdkVersion
+  # is 18 or higher. For pre-N builds we disable this mechanism by pretending
+  # that the APK's minSdkVersion is 1.
+  # For N+ builds, we let APK signer rely on the APK's minSdkVersion to
+  # determine whether to use SHA-256.
+  min_api_level = None
+  if platform_api_level > 23:
+    # Let APK signer choose whether to use SHA-1 or SHA-256, based on the APK's
+    # minSdkVersion attribute
+    min_api_level = None
+  else:
+    # Force APK signer to use SHA-1
+    min_api_level = 1
+
+  common.SignFile(unsigned.name, signed.name, keyname, pw,
+      min_api_level=min_api_level,
+      codename_to_api_level_map=codename_to_api_level_map)
 
   data = signed.read()
   unsigned.close()
@@ -144,7 +164,8 @@
 
 
 def ProcessTargetFiles(input_tf_zip, output_tf_zip, misc_info,
-                       apk_key_map, key_passwords):
+                       apk_key_map, key_passwords, platform_api_level,
+                       codename_to_api_level_map):
 
   maxsize = max([len(os.path.basename(i.filename))
                  for i in input_tf_zip.infolist()
@@ -200,7 +221,8 @@
       key = apk_key_map[name]
       if key not in common.SPECIAL_CERT_STRINGS:
         print "    signing: %-*s (%s)" % (maxsize, name, key)
-        signed_data = SignApk(data, key, key_passwords[key])
+        signed_data = SignApk(data, key, key_passwords[key], platform_api_level,
+            codename_to_api_level_map)
         common.ZipWriteStr(output_tf_zip, out_info, signed_data)
       else:
         # an APK we're not supposed to sign.
@@ -440,6 +462,57 @@
       OPTIONS.key_map[s] = d
 
 
+def GetApiLevelAndCodename(input_tf_zip):
+  data = input_tf_zip.read("SYSTEM/build.prop")
+  api_level = None
+  codename = None
+  for line in data.split("\n"):
+    line = line.strip()
+    original_line = line
+    if line and line[0] != '#' and "=" in line:
+      key, value = line.split("=", 1)
+      key = key.strip()
+      if key == "ro.build.version.sdk":
+        api_level = int(value.strip())
+      elif key == "ro.build.version.codename":
+        codename = value.strip()
+
+  if api_level is None:
+    raise ValueError("No ro.build.version.sdk in SYSTEM/build.prop")
+  if codename is None:
+    raise ValueError("No ro.build.version.codename in SYSTEM/build.prop")
+
+  return (api_level, codename)
+
+
+def GetCodenameToApiLevelMap(input_tf_zip):
+  data = input_tf_zip.read("SYSTEM/build.prop")
+  api_level = None
+  codenames = None
+  for line in data.split("\n"):
+    line = line.strip()
+    original_line = line
+    if line and line[0] != '#' and "=" in line:
+      key, value = line.split("=", 1)
+      key = key.strip()
+      if key == "ro.build.version.sdk":
+        api_level = int(value.strip())
+      elif key == "ro.build.version.all_codenames":
+        codenames = value.strip().split(",")
+
+  if api_level is None:
+    raise ValueError("No ro.build.version.sdk in SYSTEM/build.prop")
+  if codenames is None:
+    raise ValueError("No ro.build.version.all_codenames in SYSTEM/build.prop")
+
+  result = dict()
+  for codename in codenames:
+    codename = codename.strip()
+    if len(codename) > 0:
+      result[codename] = api_level
+  return result
+
+
 def main(argv):
 
   key_mapping_options = []
@@ -498,8 +571,17 @@
   CheckAllApksSigned(input_zip, apk_key_map)
 
   key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
+  platform_api_level, platform_codename = GetApiLevelAndCodename(input_zip)
+  codename_to_api_level_map = GetCodenameToApiLevelMap(input_zip)
+  # Android N will be API Level 24, but isn't yet.
+  # TODO: Remove this workaround once Android N is officially API Level 24.
+  if platform_api_level == 23 and platform_codename == "N":
+    platform_api_level = 24
+
   ProcessTargetFiles(input_zip, output_zip, misc_info,
-                     apk_key_map, key_passwords)
+                     apk_key_map, key_passwords,
+                     platform_api_level,
+                     codename_to_api_level_map)
 
   common.ZipClose(input_zip)
   common.ZipClose(output_zip)
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 013044f..4ba7560 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -31,8 +31,9 @@
   the form of a string like "0" or "0 1-5 8".
   """
 
-  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
-    self.simg_f = f = open(simg_fn, "rb")
+  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
+               mode="rb", build_map=True):
+    self.simg_f = f = open(simg_fn, mode)
 
     header_bin = f.read(28)
     header = struct.unpack("<I4H4I", header_bin)
@@ -44,7 +45,7 @@
     chunk_hdr_sz = header[4]
     self.blocksize = blk_sz = header[5]
     self.total_blocks = total_blks = header[6]
-    total_chunks = header[7]
+    self.total_chunks = total_chunks = header[7]
 
     if magic != 0xED26FF3A:
       raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
@@ -61,6 +62,9 @@
     print("Total of %u %u-byte output blocks in %u input chunks."
           % (total_blks, blk_sz, total_chunks))
 
+    if not build_map:
+      return
+
     pos = 0   # in blocks
     care_data = []
     self.offset_map = offset_map = []
@@ -126,6 +130,20 @@
     else:
       self.file_map = {"__DATA": self.care_map}
 
+  def AppendFillChunk(self, data, blocks):
+    f = self.simg_f
+
+    # Append a fill chunk
+    f.seek(0, os.SEEK_END)
+    f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
+
+    # Update the sparse header
+    self.total_blocks += blocks
+    self.total_chunks += 1
+
+    f.seek(16, os.SEEK_SET)
+    f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
+
   def ReadRangeSet(self, ranges):
     return [d for d in self._GetRangeData(ranges)]