Merge "Add missing dependency on zip2zip"
diff --git a/core/Makefile b/core/Makefile
index bb4fe0e..4eb04a6 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -4057,7 +4057,8 @@
 $(INSTALLED_SUPERIMAGE_TARGET): extracted_input_target_files := $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE))
 $(INSTALLED_SUPERIMAGE_TARGET): $(LPMAKE) $(BUILT_TARGET_FILES_PACKAGE) $(BUILD_SUPER_IMAGE)
 	$(call pretty,"Target super fs image: $@")
-	$(BUILD_SUPER_IMAGE) -v $(extracted_input_target_files) $@
+	PATH=$(dir $(LPMAKE)):$$PATH \
+	    $(BUILD_SUPER_IMAGE) -v $(extracted_input_target_files) $@
 endif
 
 $(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_TARGET))
@@ -4070,9 +4071,10 @@
 	rm -rf $(intermediates)/misc_info.txt
 	$(call dump-dynamic-partitions-info,$(intermediates)/misc_info.txt)
 ifeq ($(AB_OTA_UPDATER),true)
-	$(hide) echo "ab_update=true" >> $(intermediates)/misc_info.txt
+	echo "ab_update=true" >> $(intermediates)/misc_info.txt
 endif
-	$(BUILD_SUPER_IMAGE) -v $(intermediates)/misc_info.txt $@
+	PATH=$(dir $(LPMAKE)):$$PATH \
+	    $(BUILD_SUPER_IMAGE) -v $(intermediates)/misc_info.txt $@
 
 $(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
 
diff --git a/core/binary.mk b/core/binary.mk
index 427f689..c3878f9 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -1571,6 +1571,9 @@
 $(notice_target): | $(installed_static_library_notice_file_targets)
 $(LOCAL_INSTALLED_MODULE): | $(notice_target)
 
+$(notice_target): | $(installed_static_library_notice_file_targets)
+$(LOCAL_INSTALLED_MODULE): | $(notice_target)
+
 # Default is -fno-rtti.
 ifeq ($(strip $(LOCAL_RTTI_FLAG)),)
 LOCAL_RTTI_FLAG := -fno-rtti
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index a0f523b..0fce502 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -248,6 +248,7 @@
 LOCAL_SANITIZE:=
 LOCAL_SANITIZE_DIAG:=
 LOCAL_SANITIZE_RECOVER:=
+LOCAL_SANITIZE_NO_RECOVER:=
 LOCAL_SANITIZE_BLACKLIST :=
 LOCAL_SDK_LIBRARIES :=
 LOCAL_SDK_RES_VERSION:=
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 7b4015e..51b859e 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -401,6 +401,11 @@
   my_cflags += -fsanitize-recover=$(recover_arg)
 endif
 
+ifneq ($(strip $(LOCAL_SANITIZE_NO_RECOVER)),)
+  no_recover_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_NO_RECOVER)),
+  my_cflags += -fno-sanitize-recover=$(no_recover_arg)
+endif
+
 ifneq ($(my_sanitize_diag),)
   # TODO(vishwath): Add diagnostic support for static executables once
   # we switch to clang-4393122 (which adds the static ubsan runtime
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index d1fb4e8..097f84a 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -253,7 +253,7 @@
     LOCAL_POST_INSTALL_CMD += &&
   endif
 
-  LOCAL_POST_INSTALL_CMD += unzip -qo -d $(PRODUCT_OUT) $(my_dexpreopt_zip)
+  LOCAL_POST_INSTALL_CMD += for i in $$(zipinfo -1 $(my_dexpreopt_zip)); do mkdir -p $$(dirname $$i); done && unzip -qo -d $(PRODUCT_OUT) $(my_dexpreopt_zip)
   $(LOCAL_INSTALLED_MODULE): PRIVATE_POST_INSTALL_CMD := $(LOCAL_POST_INSTALL_CMD)
   $(LOCAL_INSTALLED_MODULE): $(my_dexpreopt_zip)
 
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index c76537b..0fd40db 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -252,7 +252,7 @@
     #  It must be of the form "YYYY-MM-DD" on production devices.
     #  It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
     #  If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
-      PLATFORM_SECURITY_PATCH := 2018-09-05
+      PLATFORM_SECURITY_PATCH := 2018-12-05
 endif
 .KATI_READONLY := PLATFORM_SECURITY_PATCH
 
diff --git a/target/product/product_launched_with_k.mk b/target/product/product_launched_with_k.mk
new file mode 100644
index 0000000..87faa12
--- /dev/null
+++ b/target/product/product_launched_with_k.mk
@@ -0,0 +1,2 @@
+#PRODUCT_SHIPPING_API_LEVEL indicates the first api level, device has been commercially launched on.
+PRODUCT_SHIPPING_API_LEVEL := 19
diff --git a/target/product/product_launched_with_l.mk b/target/product/product_launched_with_l.mk
index 6e782f7..4e79749 100644
--- a/target/product/product_launched_with_l.mk
+++ b/target/product/product_launched_with_l.mk
@@ -1,3 +1,2 @@
 #PRODUCT_SHIPPING_API_LEVEL indicates the first api level, device has been commercially launched on.
 PRODUCT_SHIPPING_API_LEVEL := 21
-
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index db484a0..0a8def8 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -1294,6 +1294,28 @@
 
         print "%s::%s:" % (logon, uid)
 
+@generator('print')
+class PrintGen(BaseGenerator):
+    """Prints just the constants and values, separated by spaces, in an easy to
+    parse format for use by other scripts.
+
+    Each line is just the identifier and the value, separated by a space.
+    """
+
+    def add_opts(self, opt_group):
+        opt_group.add_argument(
+            'aid-header', help='An android_filesystem_config.h file.')
+
+    def __call__(self, args):
+
+        hdr_parser = AIDHeaderParser(args['aid-header'])
+        aids = hdr_parser.aids
+
+        aids.sort(key=lambda item: int(item.normalized_value))
+
+        for aid in aids:
+            print '%s %s' % (aid.identifier, aid.normalized_value)
+
 
 def main():
     """Main entry point for execution."""
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 2d20e23..b5e01d3 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -26,7 +26,8 @@
 import re
 import sys
 import threading
-from collections import deque, OrderedDict
+import zlib
+from collections import deque, namedtuple, OrderedDict
 from hashlib import sha1
 
 import common
@@ -36,8 +37,12 @@
 
 logger = logging.getLogger(__name__)
 
+# The tuple contains the style and bytes of a bsdiff|imgdiff patch.
+PatchInfo = namedtuple("PatchInfo", ["imgdiff", "content"])
+
 
 def compute_patch(srcfile, tgtfile, imgdiff=False):
+  """Calls bsdiff|imgdiff to compute the patch data, returns a PatchInfo."""
   patchfile = common.MakeTempFile(prefix='patch-')
 
   cmd = ['imgdiff', '-z'] if imgdiff else ['bsdiff']
@@ -52,7 +57,7 @@
     raise ValueError(output)
 
   with open(patchfile, 'rb') as f:
-    return f.read()
+    return PatchInfo(imgdiff, f.read())
 
 
 class Image(object):
@@ -203,17 +208,17 @@
     self.id = len(by_id)
     by_id.append(self)
 
-    self._patch = None
+    self._patch_info = None
 
   @property
-  def patch(self):
-    return self._patch
+  def patch_info(self):
+    return self._patch_info
 
-  @patch.setter
-  def patch(self, patch):
-    if patch:
+  @patch_info.setter
+  def patch_info(self, info):
+    if info:
       assert self.style == "diff"
-    self._patch = patch
+    self._patch_info = info
 
   def NetStashChange(self):
     return (sum(sr.size() for (_, sr) in self.stash_before) -
@@ -224,7 +229,7 @@
     self.use_stash = []
     self.style = "new"
     self.src_ranges = RangeSet()
-    self.patch = None
+    self.patch_info = None
 
   def __str__(self):
     return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
@@ -462,16 +467,7 @@
     self.AbbreviateSourceNames()
     self.FindTransfers()
 
-    # Find the ordering dependencies among transfers (this is O(n^2)
-    # in the number of transfers).
-    self.GenerateDigraph()
-    # Find a sequence of transfers that satisfies as many ordering
-    # dependencies as possible (heuristically).
-    self.FindVertexSequence()
-    # Fix up the ordering dependencies that the sequence didn't
-    # satisfy.
-    self.ReverseBackwardEdges()
-    self.ImproveVertexSequence()
+    self.FindSequenceForTransfers()
 
     # Ensure the runtime stash size is under the limit.
     if common.OPTIONS.cache_size is not None:
@@ -829,7 +825,7 @@
             # These are identical; we don't need to generate a patch,
             # just issue copy commands on the device.
             xf.style = "move"
-            xf.patch = None
+            xf.patch_info = None
             tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
             if xf.src_ranges != xf.tgt_ranges:
               logger.info(
@@ -839,11 +835,10 @@
                       xf.tgt_name + " (from " + xf.src_name + ")"),
                   str(xf.tgt_ranges), str(xf.src_ranges))
           else:
-            if xf.patch:
-              # We have already generated the patch with imgdiff, while
-              # splitting large APKs (i.e. in FindTransfers()).
-              assert not self.disable_imgdiff
-              imgdiff = True
+            if xf.patch_info:
+              # We have already generated the patch (e.g. during split of large
+              # APKs or reduction of stash size)
+              imgdiff = xf.patch_info.imgdiff
             else:
               imgdiff = self.CanUseImgdiff(
                   xf.tgt_name, xf.tgt_ranges, xf.src_ranges)
@@ -854,85 +849,16 @@
         else:
           assert False, "unknown style " + xf.style
 
-    if diff_queue:
-      if self.threads > 1:
-        logger.info("Computing patches (using %d threads)...", self.threads)
-      else:
-        logger.info("Computing patches...")
-
-      diff_total = len(diff_queue)
-      patches = [None] * diff_total
-      error_messages = []
-
-      # Using multiprocessing doesn't give additional benefits, due to the
-      # pattern of the code. The diffing work is done by subprocess.call, which
-      # already runs in a separate process (not affected much by the GIL -
-      # Global Interpreter Lock). Using multiprocess also requires either a)
-      # writing the diff input files in the main process before forking, or b)
-      # reopening the image file (SparseImage) in the worker processes. Doing
-      # neither of them further improves the performance.
-      lock = threading.Lock()
-      def diff_worker():
-        while True:
-          with lock:
-            if not diff_queue:
-              return
-            xf_index, imgdiff, patch_index = diff_queue.pop()
-            xf = self.transfers[xf_index]
-
-          patch = xf.patch
-          if not patch:
-            src_ranges = xf.src_ranges
-            tgt_ranges = xf.tgt_ranges
-
-            src_file = common.MakeTempFile(prefix="src-")
-            with open(src_file, "wb") as fd:
-              self.src.WriteRangeDataToFd(src_ranges, fd)
-
-            tgt_file = common.MakeTempFile(prefix="tgt-")
-            with open(tgt_file, "wb") as fd:
-              self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
-
-            message = []
-            try:
-              patch = compute_patch(src_file, tgt_file, imgdiff)
-            except ValueError as e:
-              message.append(
-                  "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
-                      "imgdiff" if imgdiff else "bsdiff",
-                      xf.tgt_name if xf.tgt_name == xf.src_name else
-                      xf.tgt_name + " (from " + xf.src_name + ")",
-                      xf.tgt_ranges, xf.src_ranges, e.message))
-            if message:
-              with lock:
-                error_messages.extend(message)
-
-          with lock:
-            patches[patch_index] = (xf_index, patch)
-
-      threads = [threading.Thread(target=diff_worker)
-                 for _ in range(self.threads)]
-      for th in threads:
-        th.start()
-      while threads:
-        threads.pop().join()
-
-      if error_messages:
-        logger.error('ERROR:')
-        logger.error('\n'.join(error_messages))
-        logger.error('\n\n\n')
-        sys.exit(1)
-    else:
-      patches = []
+    patches = self.ComputePatchesForInputList(diff_queue, False)
 
     offset = 0
     with open(prefix + ".patch.dat", "wb") as patch_fd:
-      for index, patch in patches:
+      for index, patch_info, _ in patches:
         xf = self.transfers[index]
-        xf.patch_len = len(patch)
+        xf.patch_len = len(patch_info.content)
         xf.patch_start = offset
         offset += xf.patch_len
-        patch_fd.write(patch)
+        patch_fd.write(patch_info.content)
 
         tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
         logger.info(
@@ -999,6 +925,32 @@
       for i in range(s, e):
         assert touched[i] == 1
 
+  def FindSequenceForTransfers(self):
+    """Finds a sequence for the given transfers.
+
+     The goal is to minimize the violation of order dependencies between these
+     transfers, so that fewer blocks are stashed when applying the update.
+    """
+
+    # Clear the existing dependency between transfers
+    for xf in self.transfers:
+      xf.goes_before = OrderedDict()
+      xf.goes_after = OrderedDict()
+
+      xf.stash_before = []
+      xf.use_stash = []
+
+    # Find the ordering dependencies among transfers (this is O(n^2)
+    # in the number of transfers).
+    self.GenerateDigraph()
+    # Find a sequence of transfers that satisfies as many ordering
+    # dependencies as possible (heuristically).
+    self.FindVertexSequence()
+    # Fix up the ordering dependencies that the sequence didn't
+    # satisfy.
+    self.ReverseBackwardEdges()
+    self.ImproveVertexSequence()
+
   def ImproveVertexSequence(self):
     logger.info("Improving vertex order...")
 
@@ -1248,6 +1200,105 @@
           b.goes_before[a] = size
           a.goes_after[b] = size
 
+  def ComputePatchesForInputList(self, diff_queue, compress_target):
+    """Returns a list of patch information for the input list of transfers.
+
+      Args:
+        diff_queue: a list of transfers with style 'diff'
+        compress_target: If True, compresses the target ranges of each
+            transfers; and save the size.
+
+      Returns:
+        A list of (transfer order, patch_info, compressed_size) tuples.
+    """
+
+    if not diff_queue:
+      return []
+
+    if self.threads > 1:
+      logger.info("Computing patches (using %d threads)...", self.threads)
+    else:
+      logger.info("Computing patches...")
+
+    diff_total = len(diff_queue)
+    patches = [None] * diff_total
+    error_messages = []
+
+    # Using multiprocessing doesn't give additional benefits, due to the
+    # pattern of the code. The diffing work is done by subprocess.call, which
+    # already runs in a separate process (not affected much by the GIL -
+    # Global Interpreter Lock). Using multiprocess also requires either a)
+    # writing the diff input files in the main process before forking, or b)
+    # reopening the image file (SparseImage) in the worker processes. Doing
+    # neither of them further improves the performance.
+    lock = threading.Lock()
+
+    def diff_worker():
+      while True:
+        with lock:
+          if not diff_queue:
+            return
+          xf_index, imgdiff, patch_index = diff_queue.pop()
+          xf = self.transfers[xf_index]
+
+        message = []
+        compressed_size = None
+
+        patch_info = xf.patch_info
+        if not patch_info:
+          src_file = common.MakeTempFile(prefix="src-")
+          with open(src_file, "wb") as fd:
+            self.src.WriteRangeDataToFd(xf.src_ranges, fd)
+
+          tgt_file = common.MakeTempFile(prefix="tgt-")
+          with open(tgt_file, "wb") as fd:
+            self.tgt.WriteRangeDataToFd(xf.tgt_ranges, fd)
+
+          try:
+            patch_info = compute_patch(src_file, tgt_file, imgdiff)
+          except ValueError as e:
+            message.append(
+                "Failed to generate %s for %s: tgt=%s, src=%s:\n%s" % (
+                    "imgdiff" if imgdiff else "bsdiff",
+                    xf.tgt_name if xf.tgt_name == xf.src_name else
+                    xf.tgt_name + " (from " + xf.src_name + ")",
+                    xf.tgt_ranges, xf.src_ranges, e.message))
+
+        if compress_target:
+          tgt_data = self.tgt.ReadRangeSet(xf.tgt_ranges)
+          try:
+            # Compresses with the default level
+            compress_obj = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
+            compressed_data = (compress_obj.compress("".join(tgt_data))
+                               + compress_obj.flush())
+            compressed_size = len(compressed_data)
+          except zlib.error as e:
+            message.append(
+                "Failed to compress the data in target range {} for {}:\n"
+                "{}".format(xf.tgt_ranges, xf.tgt_name, e.message))
+
+        if message:
+          with lock:
+            error_messages.extend(message)
+
+        with lock:
+          patches[patch_index] = (xf_index, patch_info, compressed_size)
+
+    threads = [threading.Thread(target=diff_worker)
+               for _ in range(self.threads)]
+    for th in threads:
+      th.start()
+    while threads:
+      threads.pop().join()
+
+    if error_messages:
+      logger.error('ERROR:')
+      logger.error('\n'.join(error_messages))
+      logger.error('\n\n\n')
+      sys.exit(1)
+
+    return patches
+
   def FindTransfers(self):
     """Parse the file_map to generate all the transfers."""
 
@@ -1585,7 +1636,7 @@
                                 self.tgt.RangeSha1(tgt_ranges),
                                 self.src.RangeSha1(src_ranges),
                                 "diff", self.transfers)
-      transfer_split.patch = patch
+      transfer_split.patch_info = PatchInfo(True, patch)
 
   def AbbreviateSourceNames(self):
     for k in self.src.file_map.keys():